diff -Nur kernel-2.6.32.54/Documentation/scheduler/sched-cfs-hard-limits.txt kernel-2.6.32.54.vs/Documentation/scheduler/sched-cfs-hard-limits.txt --- kernel-2.6.32.54/Documentation/scheduler/sched-cfs-hard-limits.txt 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/Documentation/scheduler/sched-cfs-hard-limits.txt 2012-01-16 14:50:48.745524729 +0100 @@ -0,0 +1,48 @@ +CPU HARD LIMITS FOR CFS GROUPS +============================== + +1. Overview +2. Interface +3. Examples + +1. Overview +----------- + +CFS is a proportional share scheduler which tries to divide the CPU time +proportionately between tasks or groups of tasks (task group/cgroup) depending +on the priority/weight of the task or shares assigned to groups of tasks. +In CFS, a task/task group can get more than its share of CPU if there are +enough idle CPU cycles available in the system, due to the work conserving +nature of the scheduler. However in certain scenarios (like pay-per-use), +it is desirable not to provide extra time to a group even in the presence +of idle CPU cycles. This is where hard limiting can be of use. + +Hard limits for task groups can be set by specifying how much CPU runtime a +group can consume within a given period. If the group consumes more CPU time +than the runtime in a given period, it gets throttled. None of the tasks of +the throttled group gets to run until the runtime of the group gets refreshed +at the beginning of the next period. + +2. Interface +------------ + +Hard limit feature adds 2 cgroup files for CFS group scheduler: + +cfs_runtime_us: Hard limit for the group in microseconds. + +cfs_period_us: Time period in microseconds within which hard limits is +enforced. + +A group gets created with default values for runtime (infinite runtime which +means hard limits disabled) and period (0.5s). Each group can set its own +values for runtime and period independent of other groups in the system. + +3. Examples +----------- + +# mount -t cgroup -ocpu none /cgroups/ +# cd /cgroups +# mkdir 1 +# cd 1/ +# echo 250000 > cfs_runtime_us /* set a 250ms runtime or limit */ +# echo 500000 > cfs_period_us /* set a 500ms period */ diff -Nur kernel-2.6.32.54/Documentation/vserver/debug.txt kernel-2.6.32.54.vs/Documentation/vserver/debug.txt --- kernel-2.6.32.54/Documentation/vserver/debug.txt 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/Documentation/vserver/debug.txt 2012-01-16 14:50:48.745524729 +0100 @@ -0,0 +1,154 @@ + +debug_cvirt: + + 2 4 "vx_map_tgid: %p/%llx: %d -> %d" + "vx_rmap_tgid: %p/%llx: %d -> %d" + +debug_dlim: + + 0 1 "ALLOC (%p,#%d)%c inode (%d)" + "FREE (%p,#%d)%c inode" + 1 2 "ALLOC (%p,#%d)%c %lld bytes (%d)" + "FREE (%p,#%d)%c %lld bytes" + 2 4 "ADJUST: %lld,%lld on %ld,%ld [mult=%d]" + 3 8 "ext3_has_free_blocks(%p): %lu<%lu+1, %c, %u!=%u r=%d" + "ext3_has_free_blocks(%p): free=%lu, root=%lu" + "rcu_free_dl_info(%p)" + 4 10 "alloc_dl_info(%p,%d) = %p" + "dealloc_dl_info(%p)" + "get_dl_info(%p[#%d.%d])" + "put_dl_info(%p[#%d.%d])" + 5 20 "alloc_dl_info(%p,%d)*" + 6 40 "__hash_dl_info: %p[#%d]" + "__unhash_dl_info: %p[#%d]" + 7 80 "locate_dl_info(%p,#%d) = %p" + +debug_misc: + + 0 1 "destroy_dqhash: %p [#0x%08x] c=%d" + "new_dqhash: %p [#0x%08x]" + "vroot[%d]_clr_dev: dev=%p[%lu,%d:%d]" + "vroot[%d]_get_real_bdev: dev=%p[%lu,%d:%d]" + "vroot[%d]_set_dev: dev=%p[%lu,%d:%d]" + "vroot_get_real_bdev not set" + 1 2 "cow_break_link(»%s«)" + "temp copy »%s«" + 2 4 "dentry_open(new): %p" + "dentry_open(old): %p" + "lookup_create(new): %p" + "old path »%s«" + "path_lookup(old): %d" + "vfs_create(new): %d" + "vfs_rename: %d" + "vfs_sendfile: %d" + 3 8 "fput(new_file=%p[#%d])" + "fput(old_file=%p[#%d])" + 4 10 "vx_info_kill(%p[#%d],%d,%d) = %d" + "vx_info_kill(%p[#%d],%d,%d)*" + 5 20 "vs_reboot(%p[#%d],%d)" + 6 40 "dropping task %p[#%u,%u] for %p[#%u,%u]" + +debug_net: + + 2 4 "nx_addr_conflict(%p,%p) %d.%d,%d.%d" + 3 8 "inet_bind(%p) %d.%d.%d.%d, %d.%d.%d.%d, %d.%d.%d.%d" + "inet_bind(%p)* %p,%p;%lx %d.%d.%d.%d" + 4 10 "ip_route_connect(%p) %p,%p;%lx" + 5 20 "__addr_in_socket(%p,%d.%d.%d.%d) %p:%d.%d.%d.%d %p;%lx" + 6 40 "sk,egf: %p [#%d] (from %d)" + "sk,egn: %p [#%d] (from %d)" + "sk,req: %p [#%d] (from %d)" + "sk: %p [#%d] (from %d)" + "tw: %p [#%d] (from %d)" + 7 80 "__sock_recvmsg: %p[%p,%p,%p;%d]:%d/%d" + "__sock_sendmsg: %p[%p,%p,%p;%d]:%d/%d" + +debug_nid: + + 0 1 "__lookup_nx_info(#%u): %p[#%u]" + "alloc_nx_info(%d) = %p" + "create_nx_info(%d) (dynamic rejected)" + "create_nx_info(%d) = %p (already there)" + "create_nx_info(%d) = %p (new)" + "dealloc_nx_info(%p)" + 1 2 "alloc_nx_info(%d)*" + "create_nx_info(%d)*" + 2 4 "get_nx_info(%p[#%d.%d])" + "put_nx_info(%p[#%d.%d])" + 3 8 "claim_nx_info(%p[#%d.%d.%d]) %p" + "clr_nx_info(%p[#%d.%d])" + "init_nx_info(%p[#%d.%d])" + "release_nx_info(%p[#%d.%d.%d]) %p" + "set_nx_info(%p[#%d.%d])" + 4 10 "__hash_nx_info: %p[#%d]" + "__nx_dynamic_id: [#%d]" + "__unhash_nx_info: %p[#%d.%d.%d]" + 5 20 "moved task %p into nxi:%p[#%d]" + "nx_migrate_task(%p,%p[#%d.%d.%d])" + "task_get_nx_info(%p)" + 6 40 "nx_clear_persistent(%p[#%d])" + +debug_quota: + + 0 1 "quota_sync_dqh(%p,%d) discard inode %p" + 1 2 "quota_sync_dqh(%p,%d)" + "sync_dquots(%p,%d)" + "sync_dquots_dqh(%p,%d)" + 3 8 "do_quotactl(%p,%d,cmd=%d,id=%d,%p)" + +debug_switch: + + 0 1 "vc: VCMD_%02d_%d[%d], %d,%p [%d,%d,%x,%x]" + 1 2 "vc: VCMD_%02d_%d[%d] = %08lx(%ld) [%d,%d]" + 4 10 "%s: (%s %s) returned %s with %d" + +debug_tag: + + 7 80 "dx_parse_tag(»%s«): %d:#%d" + "dx_propagate_tag(%p[#%lu.%d]): %d,%d" + +debug_xid: + + 0 1 "__lookup_vx_info(#%u): %p[#%u]" + "alloc_vx_info(%d) = %p" + "alloc_vx_info(%d)*" + "create_vx_info(%d) (dynamic rejected)" + "create_vx_info(%d) = %p (already there)" + "create_vx_info(%d) = %p (new)" + "dealloc_vx_info(%p)" + "loc_vx_info(%d) = %p (found)" + "loc_vx_info(%d) = %p (new)" + "loc_vx_info(%d) = %p (not available)" + 1 2 "create_vx_info(%d)*" + "loc_vx_info(%d)*" + 2 4 "get_vx_info(%p[#%d.%d])" + "put_vx_info(%p[#%d.%d])" + 3 8 "claim_vx_info(%p[#%d.%d.%d]) %p" + "clr_vx_info(%p[#%d.%d])" + "init_vx_info(%p[#%d.%d])" + "release_vx_info(%p[#%d.%d.%d]) %p" + "set_vx_info(%p[#%d.%d])" + 4 10 "__hash_vx_info: %p[#%d]" + "__unhash_vx_info: %p[#%d.%d.%d]" + "__vx_dynamic_id: [#%d]" + 5 20 "enter_vx_info(%p[#%d],%p) %p[#%d,%p]" + "leave_vx_info(%p[#%d,%p]) %p[#%d,%p]" + "moved task %p into vxi:%p[#%d]" + "task_get_vx_info(%p)" + "vx_migrate_task(%p,%p[#%d.%d])" + 6 40 "vx_clear_persistent(%p[#%d])" + "vx_exit_init(%p[#%d],%p[#%d,%d,%d])" + "vx_set_init(%p[#%d],%p[#%d,%d,%d])" + "vx_set_persistent(%p[#%d])" + "vx_set_reaper(%p[#%d],%p[#%d,%d])" + 7 80 "vx_child_reaper(%p[#%u,%u]) = %p[#%u,%u]" + + +debug_limit: + + n 2^n "vx_acc_cres[%5d,%s,%2d]: %5d%s" + "vx_cres_avail[%5d,%s,%2d]: %5ld > %5d + %5d" + + m 2^m "vx_acc_page[%5d,%s,%2d]: %5d%s" + "vx_acc_pages[%5d,%s,%2d]: %5d += %5d" + "vx_pages_avail[%5d,%s,%2d]: %5ld > %5d + %5d" diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/alpha/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/arch/alpha/Kconfig --- kernel-2.6.32.54/linux-2.6.32/arch/alpha/Kconfig 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/alpha/Kconfig 2012-01-16 14:51:21.689409499 +0100 @@ -674,6 +674,8 @@ depends on VGA_HOSE default y +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/alpha/kernel/entry.S kernel-2.6.32.54.vs/linux-2.6.32/arch/alpha/kernel/entry.S --- kernel-2.6.32.54/linux-2.6.32/arch/alpha/kernel/entry.S 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/alpha/kernel/entry.S 2012-01-16 14:51:21.689409499 +0100 @@ -874,24 +874,15 @@ .globl sys_getxpid .ent sys_getxpid sys_getxpid: + lda $sp, -16($sp) + stq $26, 0($sp) .prologue 0 - ldq $2, TI_TASK($8) - /* See linux/kernel/timer.c sys_getppid for discussion - about this loop. */ - ldq $3, TASK_GROUP_LEADER($2) - ldq $4, TASK_REAL_PARENT($3) - ldl $0, TASK_TGID($2) -1: ldl $1, TASK_TGID($4) -#ifdef CONFIG_SMP - mov $4, $5 - mb - ldq $3, TASK_GROUP_LEADER($2) - ldq $4, TASK_REAL_PARENT($3) - cmpeq $4, $5, $5 - beq $5, 1b -#endif - stq $1, 80($sp) + lda $16, 96($sp) + jsr $26, do_getxpid + ldq $26, 0($sp) + + lda $sp, 16($sp) ret .end sys_getxpid diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/alpha/kernel/osf_sys.c kernel-2.6.32.54.vs/linux-2.6.32/arch/alpha/kernel/osf_sys.c --- kernel-2.6.32.54/linux-2.6.32/arch/alpha/kernel/osf_sys.c 2012-01-16 15:01:37.876554641 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/alpha/kernel/osf_sys.c 2012-01-16 14:51:21.689409499 +0100 @@ -865,7 +865,7 @@ { if (tv) { struct timeval ktv; - do_gettimeofday(&ktv); + vx_gettimeofday(&ktv); if (put_tv32(tv, &ktv)) return -EFAULT; } diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/alpha/kernel/ptrace.c kernel-2.6.32.54.vs/linux-2.6.32/arch/alpha/kernel/ptrace.c --- kernel-2.6.32.54/linux-2.6.32/arch/alpha/kernel/ptrace.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/alpha/kernel/ptrace.c 2012-01-16 14:51:21.693409485 +0100 @@ -14,6 +14,7 @@ #include #include #include +#include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/alpha/kernel/systbls.S kernel-2.6.32.54.vs/linux-2.6.32/arch/alpha/kernel/systbls.S --- kernel-2.6.32.54/linux-2.6.32/arch/alpha/kernel/systbls.S 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/alpha/kernel/systbls.S 2012-01-16 14:51:21.693409485 +0100 @@ -446,7 +446,7 @@ .quad sys_stat64 /* 425 */ .quad sys_lstat64 .quad sys_fstat64 - .quad sys_ni_syscall /* sys_vserver */ + .quad sys_vserver /* sys_vserver */ .quad sys_ni_syscall /* sys_mbind */ .quad sys_ni_syscall /* sys_get_mempolicy */ .quad sys_ni_syscall /* sys_set_mempolicy */ diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/alpha/kernel/traps.c kernel-2.6.32.54.vs/linux-2.6.32/arch/alpha/kernel/traps.c --- kernel-2.6.32.54/linux-2.6.32/arch/alpha/kernel/traps.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/alpha/kernel/traps.c 2012-01-16 14:51:21.693409485 +0100 @@ -183,7 +183,8 @@ #ifdef CONFIG_SMP printk("CPU %d ", hard_smp_processor_id()); #endif - printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err); + printk("%s(%d[#%u]): %s %ld\n", current->comm, + task_pid_nr(current), current->xid, str, err); dik_show_regs(regs, r9_15); add_taint(TAINT_DIE); dik_show_trace((unsigned long *)(regs+1)); diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/alpha/mm/fault.c kernel-2.6.32.54.vs/linux-2.6.32/arch/alpha/mm/fault.c --- kernel-2.6.32.54/linux-2.6.32/arch/alpha/mm/fault.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/alpha/mm/fault.c 2012-01-16 14:51:21.693409485 +0100 @@ -193,8 +193,8 @@ down_read(&mm->mmap_sem); goto survive; } - printk(KERN_ALERT "VM: killing process %s(%d)\n", - current->comm, task_pid_nr(current)); + printk(KERN_ALERT "VM: killing process %s(%d:#%u)\n", + current->comm, task_pid_nr(current), current->xid); if (!user_mode(regs)) goto no_context; do_group_exit(SIGKILL); diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/arm/include/asm/tlb.h kernel-2.6.32.54.vs/linux-2.6.32/arch/arm/include/asm/tlb.h --- kernel-2.6.32.54/linux-2.6.32/arch/arm/include/asm/tlb.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/arm/include/asm/tlb.h 2012-01-16 14:51:21.693409485 +0100 @@ -27,6 +27,7 @@ #else /* !CONFIG_MMU */ +#include #include /* diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/arm/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/arch/arm/Kconfig --- kernel-2.6.32.54/linux-2.6.32/arch/arm/Kconfig 2012-01-16 15:01:37.880556627 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/arm/Kconfig 2012-01-16 14:51:21.693409485 +0100 @@ -1536,6 +1536,8 @@ source "arch/arm/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/arm/kernel/calls.S kernel-2.6.32.54.vs/linux-2.6.32/arch/arm/kernel/calls.S --- kernel-2.6.32.54/linux-2.6.32/arch/arm/kernel/calls.S 2012-01-16 15:01:37.880556627 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/arm/kernel/calls.S 2012-01-16 14:51:21.693409485 +0100 @@ -322,7 +322,7 @@ /* 310 */ CALL(sys_request_key) CALL(sys_keyctl) CALL(ABI(sys_semtimedop, sys_oabi_semtimedop)) -/* vserver */ CALL(sys_ni_syscall) + CALL(sys_vserver) CALL(sys_ioprio_set) /* 315 */ CALL(sys_ioprio_get) CALL(sys_inotify_init) diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/arm/kernel/process.c kernel-2.6.32.54.vs/linux-2.6.32/arch/arm/kernel/process.c --- kernel-2.6.32.54/linux-2.6.32/arch/arm/kernel/process.c 2012-01-16 15:01:37.880556627 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/arm/kernel/process.c 2012-01-16 14:51:21.693409485 +0100 @@ -272,7 +272,8 @@ void show_regs(struct pt_regs * regs) { printk("\n"); - printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm); + printk("Pid: %d[#%u], comm: %20s\n", + task_pid_nr(current), current->xid, current->comm); __show_regs(regs); __backtrace(); } diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/arm/kernel/traps.c kernel-2.6.32.54.vs/linux-2.6.32/arch/arm/kernel/traps.c --- kernel-2.6.32.54/linux-2.6.32/arch/arm/kernel/traps.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/arm/kernel/traps.c 2012-01-16 14:51:21.693409485 +0100 @@ -234,8 +234,8 @@ sysfs_printk_last_file(); print_modules(); __show_regs(regs); - printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", - TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1); + printk(KERN_EMERG "Process %.*s (pid: %d:#%u, stack limit = 0x%p)\n", + TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), tsk->xid, thread + 1); if (!user_mode(regs) || in_interrupt()) { dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp, diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/avr32/mm/fault.c kernel-2.6.32.54.vs/linux-2.6.32/arch/avr32/mm/fault.c --- kernel-2.6.32.54/linux-2.6.32/arch/avr32/mm/fault.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/avr32/mm/fault.c 2012-01-16 14:51:21.693409485 +0100 @@ -216,7 +216,8 @@ down_read(&mm->mmap_sem); goto survive; } - printk("VM: Killing process %s\n", tsk->comm); + printk("VM: Killing process %s(%d:#%u)\n", + tsk->comm, task_pid_nr(tsk), tsk->xid); if (user_mode(regs)) do_group_exit(SIGKILL); goto no_context; diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/cris/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/arch/cris/Kconfig --- kernel-2.6.32.54/linux-2.6.32/arch/cris/Kconfig 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/cris/Kconfig 2012-01-16 14:51:21.701409457 +0100 @@ -685,6 +685,8 @@ source "arch/cris/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/cris/mm/fault.c kernel-2.6.32.54.vs/linux-2.6.32/arch/cris/mm/fault.c --- kernel-2.6.32.54/linux-2.6.32/arch/cris/mm/fault.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/cris/mm/fault.c 2012-01-16 14:51:21.701409457 +0100 @@ -245,7 +245,8 @@ out_of_memory: up_read(&mm->mmap_sem); - printk("VM: killing process %s\n", tsk->comm); + printk("VM: killing process %s(%d:#%u)\n", + tsk->comm, task_pid_nr(tsk), tsk->xid); if (user_mode(regs)) do_exit(SIGKILL); goto no_context; diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/frv/kernel/kernel_thread.S kernel-2.6.32.54.vs/linux-2.6.32/arch/frv/kernel/kernel_thread.S --- kernel-2.6.32.54/linux-2.6.32/arch/frv/kernel/kernel_thread.S 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/frv/kernel/kernel_thread.S 2012-01-16 14:51:21.701409457 +0100 @@ -37,7 +37,7 @@ # start by forking the current process, but with shared VM setlos.p #__NR_clone,gr7 ; syscall number - ori gr10,#CLONE_VM,gr8 ; first syscall arg [clone_flags] + ori gr10,#CLONE_KT,gr8 ; first syscall arg [clone_flags] sethi.p #0xe4e4,gr9 ; second syscall arg [newsp] setlo #0xe4e4,gr9 setlos.p #0,gr10 ; third syscall arg [parent_tidptr] diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/frv/mm/fault.c kernel-2.6.32.54.vs/linux-2.6.32/arch/frv/mm/fault.c --- kernel-2.6.32.54/linux-2.6.32/arch/frv/mm/fault.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/frv/mm/fault.c 2012-01-16 14:51:21.701409457 +0100 @@ -257,7 +257,8 @@ */ out_of_memory: up_read(&mm->mmap_sem); - printk("VM: killing process %s\n", current->comm); + printk("VM: killing process %s(%d:#%u)\n", + current->comm, task_pid_nr(current), current->xid); if (user_mode(__frame)) do_group_exit(SIGKILL); goto no_context; diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/h8300/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/arch/h8300/Kconfig --- kernel-2.6.32.54/linux-2.6.32/arch/h8300/Kconfig 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/h8300/Kconfig 2012-01-16 14:51:21.701409457 +0100 @@ -226,6 +226,8 @@ source "arch/h8300/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/ia64/ia32/ia32_entry.S kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/ia32/ia32_entry.S --- kernel-2.6.32.54/linux-2.6.32/arch/ia64/ia32/ia32_entry.S 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/ia32/ia32_entry.S 2012-01-16 14:51:21.705409443 +0100 @@ -451,7 +451,7 @@ data8 sys_tgkill /* 270 */ data8 compat_sys_utimes data8 sys32_fadvise64_64 - data8 sys_ni_syscall + data8 sys32_vserver data8 sys_ni_syscall data8 sys_ni_syscall /* 275 */ data8 sys_ni_syscall diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/ia64/include/asm/tlb.h kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/include/asm/tlb.h --- kernel-2.6.32.54/linux-2.6.32/arch/ia64/include/asm/tlb.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/include/asm/tlb.h 2012-01-16 14:51:21.705409443 +0100 @@ -40,6 +40,7 @@ #include #include #include +#include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/ia64/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/Kconfig --- kernel-2.6.32.54/linux-2.6.32/arch/ia64/Kconfig 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/Kconfig 2012-01-16 14:51:21.701409457 +0100 @@ -685,6 +685,8 @@ source "arch/ia64/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/ia64/kernel/entry.S kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/kernel/entry.S --- kernel-2.6.32.54/linux-2.6.32/arch/ia64/kernel/entry.S 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/kernel/entry.S 2012-01-16 14:51:21.705409443 +0100 @@ -1753,7 +1753,7 @@ data8 sys_mq_notify data8 sys_mq_getsetattr data8 sys_kexec_load - data8 sys_ni_syscall // reserved for vserver + data8 sys_vserver data8 sys_waitid // 1270 data8 sys_add_key data8 sys_request_key diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/ia64/kernel/perfmon.c kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/kernel/perfmon.c --- kernel-2.6.32.54/linux-2.6.32/arch/ia64/kernel/perfmon.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/kernel/perfmon.c 2012-01-16 14:51:21.705409443 +0100 @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -2372,7 +2373,7 @@ */ insert_vm_struct(mm, vma); - mm->total_vm += size >> PAGE_SHIFT; + vx_vmpages_add(mm, size >> PAGE_SHIFT); vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, vma_pages(vma)); up_write(&task->mm->mmap_sem); diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/ia64/kernel/process.c kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/kernel/process.c --- kernel-2.6.32.54/linux-2.6.32/arch/ia64/kernel/process.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/kernel/process.c 2012-01-16 14:51:21.705409443 +0100 @@ -110,8 +110,8 @@ unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; print_modules(); - printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current), - smp_processor_id(), current->comm); + printk("\nPid: %d[#%u], CPU %d, comm: %20s\n", task_pid_nr(current), + current->xid, smp_processor_id(), current->comm); printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n", regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(), init_utsname()->release); diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/ia64/kernel/ptrace.c kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/kernel/ptrace.c --- kernel-2.6.32.54/linux-2.6.32/arch/ia64/kernel/ptrace.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/kernel/ptrace.c 2012-01-16 14:51:21.713409415 +0100 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/ia64/kernel/traps.c kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/kernel/traps.c --- kernel-2.6.32.54/linux-2.6.32/arch/ia64/kernel/traps.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/kernel/traps.c 2012-01-16 14:51:21.713409415 +0100 @@ -60,8 +60,9 @@ put_cpu(); if (++die.lock_owner_depth < 3) { - printk("%s[%d]: %s %ld [%d]\n", - current->comm, task_pid_nr(current), str, err, ++die_counter); + printk("%s[%d[#%u]]: %s %ld [%d]\n", + current->comm, task_pid_nr(current), current->xid, + str, err, ++die_counter); if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) != NOTIFY_STOP) show_regs(regs); @@ -324,8 +325,9 @@ if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) { last.time = current_jiffies + 5 * HZ; printk(KERN_WARNING - "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n", - current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr); + "%s(%d[#%u]): floating-point assist fault at ip %016lx, isr %016lx\n", + current->comm, task_pid_nr(current), current->xid, + regs->cr_iip + ia64_psr(regs)->ri, isr); } } } diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/ia64/mm/fault.c kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/mm/fault.c --- kernel-2.6.32.54/linux-2.6.32/arch/ia64/mm/fault.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/ia64/mm/fault.c 2012-01-16 14:51:21.713409415 +0100 @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -281,7 +282,8 @@ down_read(&mm->mmap_sem); goto survive; } - printk(KERN_CRIT "VM: killing process %s\n", current->comm); + printk(KERN_CRIT "VM: killing process %s(%d:#%u)\n", + current->comm, task_pid_nr(current), current->xid); if (user_mode(regs)) do_group_exit(SIGKILL); goto no_context; diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/m32r/kernel/traps.c kernel-2.6.32.54.vs/linux-2.6.32/arch/m32r/kernel/traps.c --- kernel-2.6.32.54/linux-2.6.32/arch/m32r/kernel/traps.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/m32r/kernel/traps.c 2012-01-16 14:51:21.713409415 +0100 @@ -196,8 +196,9 @@ } else { printk("SPI: %08lx\n", sp); } - printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)", - current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current); + printk("Process %s (pid: %d[#%u], process nr: %d, stackpage=%08lx)", + current->comm, task_pid_nr(current), current->xid, + 0xffff & i, 4096+(unsigned long)current); /* * When in-kernel, we also print out the stack and code at the diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/m32r/mm/fault.c kernel-2.6.32.54.vs/linux-2.6.32/arch/m32r/mm/fault.c --- kernel-2.6.32.54/linux-2.6.32/arch/m32r/mm/fault.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/m32r/mm/fault.c 2012-01-16 14:51:21.713409415 +0100 @@ -276,7 +276,8 @@ down_read(&mm->mmap_sem); goto survive; } - printk("VM: killing process %s\n", tsk->comm); + printk("VM: killing process %s(%d:#%u)\n", + tsk->comm, task_pid_nr(tsk), tsk->xid); if (error_code & ACE_USERMODE) do_group_exit(SIGKILL); goto no_context; diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/m68k/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/arch/m68k/Kconfig --- kernel-2.6.32.54/linux-2.6.32/arch/m68k/Kconfig 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/m68k/Kconfig 2012-01-16 14:51:21.713409415 +0100 @@ -622,6 +622,8 @@ source "arch/m68k/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/m68k/kernel/ptrace.c kernel-2.6.32.54.vs/linux-2.6.32/arch/m68k/kernel/ptrace.c --- kernel-2.6.32.54/linux-2.6.32/arch/m68k/kernel/ptrace.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/m68k/kernel/ptrace.c 2012-01-16 14:51:21.713409415 +0100 @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -269,6 +270,8 @@ ret = ptrace_request(child, request, addr, data); break; } + if (!vx_check(vx_task_xid(child), VS_WATCH_P | VS_IDENT)) + goto out_tsk; return ret; out_eio: diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/m68k/kernel/traps.c kernel-2.6.32.54.vs/linux-2.6.32/arch/m68k/kernel/traps.c --- kernel-2.6.32.54/linux-2.6.32/arch/m68k/kernel/traps.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/m68k/kernel/traps.c 2012-01-16 14:51:21.713409415 +0100 @@ -906,8 +906,8 @@ printk("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n", regs->d4, regs->d5, regs->a0, regs->a1); - printk("Process %s (pid: %d, task=%p)\n", - current->comm, task_pid_nr(current), current); + printk("Process %s (pid: %d[#%u], task=%p)\n", + current->comm, task_pid_nr(current), current->xid, current); addr = (unsigned long)&fp->un; printk("Frame format=%X ", regs->format); switch (regs->format) { diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/m68k/mm/fault.c kernel-2.6.32.54.vs/linux-2.6.32/arch/m68k/mm/fault.c --- kernel-2.6.32.54/linux-2.6.32/arch/m68k/mm/fault.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/m68k/mm/fault.c 2012-01-16 14:51:21.713409415 +0100 @@ -186,7 +186,8 @@ goto survive; } - printk("VM: killing process %s\n", current->comm); + printk("VM: killing process %s(%d:#%u)\n", + current->comm, task_pid_nr(current), current->xid); if (user_mode(regs)) do_group_exit(SIGKILL); diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/m68knommu/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/arch/m68knommu/Kconfig --- kernel-2.6.32.54/linux-2.6.32/arch/m68knommu/Kconfig 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/m68knommu/Kconfig 2012-01-16 14:51:21.713409415 +0100 @@ -727,6 +727,8 @@ source "arch/m68knommu/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/m68knommu/kernel/traps.c kernel-2.6.32.54.vs/linux-2.6.32/arch/m68knommu/kernel/traps.c --- kernel-2.6.32.54/linux-2.6.32/arch/m68knommu/kernel/traps.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/m68knommu/kernel/traps.c 2012-01-16 14:51:21.713409415 +0100 @@ -78,8 +78,9 @@ printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n", fp->d4, fp->d5, fp->a0, fp->a1); - printk(KERN_EMERG "Process %s (pid: %d, stackpage=%08lx)\n", - current->comm, current->pid, PAGE_SIZE+(unsigned long)current); + printk(KERN_EMERG "Process %s (pid: %d[#%u], stackpage=%08lx)\n", + current->comm, task_pid_nr(current), current->xid, + PAGE_SIZE+(unsigned long)current); show_stack(NULL, (unsigned long *)(fp + 1)); add_taint(TAINT_DIE); do_exit(SIGSEGV); diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/microblaze/mm/fault.c kernel-2.6.32.54.vs/linux-2.6.32/arch/microblaze/mm/fault.c --- kernel-2.6.32.54/linux-2.6.32/arch/microblaze/mm/fault.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/microblaze/mm/fault.c 2012-01-16 14:51:21.713409415 +0100 @@ -279,7 +279,8 @@ goto survive; } up_read(&mm->mmap_sem); - printk(KERN_WARNING "VM: killing process %s\n", current->comm); + printk(KERN_WARNING "VM: killing process %s(%d:#%u)\n", + current->comm, task_pid_nr(current), current->xid); if (user_mode(regs)) do_exit(SIGKILL); bad_page_fault(regs, address, SIGKILL); diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/mips/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/arch/mips/Kconfig --- kernel-2.6.32.54/linux-2.6.32/arch/mips/Kconfig 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/mips/Kconfig 2012-01-16 14:51:21.721409387 +0100 @@ -2188,6 +2188,8 @@ source "arch/mips/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/mips/kernel/ptrace.c kernel-2.6.32.54.vs/linux-2.6.32/arch/mips/kernel/ptrace.c --- kernel-2.6.32.54/linux-2.6.32/arch/mips/kernel/ptrace.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/mips/kernel/ptrace.c 2012-01-16 14:51:21.721409387 +0100 @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -259,6 +260,9 @@ { int ret; + if (!vx_check(vx_task_xid(child), VS_WATCH_P | VS_IDENT)) + goto out; + switch (request) { /* when I and D space are separate, these will need to be fixed. */ case PTRACE_PEEKTEXT: /* read word at location addr. */ diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/mips/kernel/scall32-o32.S kernel-2.6.32.54.vs/linux-2.6.32/arch/mips/kernel/scall32-o32.S --- kernel-2.6.32.54/linux-2.6.32/arch/mips/kernel/scall32-o32.S 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/mips/kernel/scall32-o32.S 2012-01-16 14:51:21.721409387 +0100 @@ -525,7 +525,7 @@ sys sys_mq_timedreceive 5 sys sys_mq_notify 2 /* 4275 */ sys sys_mq_getsetattr 3 - sys sys_ni_syscall 0 /* sys_vserver */ + sys sys_vserver 3 sys sys_waitid 5 sys sys_ni_syscall 0 /* available, was setaltroot */ sys sys_add_key 5 /* 4280 */ diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/mips/kernel/scall64-64.S kernel-2.6.32.54.vs/linux-2.6.32/arch/mips/kernel/scall64-64.S --- kernel-2.6.32.54/linux-2.6.32/arch/mips/kernel/scall64-64.S 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/mips/kernel/scall64-64.S 2012-01-16 14:51:21.721409387 +0100 @@ -362,7 +362,7 @@ PTR sys_mq_timedreceive PTR sys_mq_notify PTR sys_mq_getsetattr /* 5235 */ - PTR sys_ni_syscall /* sys_vserver */ + PTR sys_vserver PTR sys_waitid PTR sys_ni_syscall /* available, was setaltroot */ PTR sys_add_key diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/mips/kernel/scall64-n32.S kernel-2.6.32.54.vs/linux-2.6.32/arch/mips/kernel/scall64-n32.S --- kernel-2.6.32.54/linux-2.6.32/arch/mips/kernel/scall64-n32.S 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/mips/kernel/scall64-n32.S 2012-01-16 14:51:21.721409387 +0100 @@ -360,7 +360,7 @@ PTR compat_sys_mq_timedreceive PTR compat_sys_mq_notify PTR compat_sys_mq_getsetattr - PTR sys_ni_syscall /* 6240, sys_vserver */ + PTR sys32_vserver /* 6240 */ PTR compat_sys_waitid PTR sys_ni_syscall /* available, was setaltroot */ PTR sys_add_key diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/mips/kernel/scall64-o32.S kernel-2.6.32.54.vs/linux-2.6.32/arch/mips/kernel/scall64-o32.S --- kernel-2.6.32.54/linux-2.6.32/arch/mips/kernel/scall64-o32.S 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/mips/kernel/scall64-o32.S 2012-01-16 14:51:21.721409387 +0100 @@ -480,7 +480,7 @@ PTR compat_sys_mq_timedreceive PTR compat_sys_mq_notify /* 4275 */ PTR compat_sys_mq_getsetattr - PTR sys_ni_syscall /* sys_vserver */ + PTR sys32_vserver PTR sys_32_waitid PTR sys_ni_syscall /* available, was setaltroot */ PTR sys_add_key /* 4280 */ diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/mips/kernel/traps.c kernel-2.6.32.54.vs/linux-2.6.32/arch/mips/kernel/traps.c --- kernel-2.6.32.54/linux-2.6.32/arch/mips/kernel/traps.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/mips/kernel/traps.c 2012-01-16 14:51:21.721409387 +0100 @@ -335,9 +335,10 @@ __show_regs(regs); print_modules(); - printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n", - current->comm, current->pid, current_thread_info(), current, - field, current_thread_info()->tp_value); + printk("Process %s (pid: %d:#%u, threadinfo=%p, task=%p, tls=%0*lx)\n", + current->comm, task_pid_nr(current), current->xid, + current_thread_info(), current, + field, current_thread_info()->tp_value); if (cpu_has_userlocal) { unsigned long tls; diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/mn10300/mm/fault.c kernel-2.6.32.54.vs/linux-2.6.32/arch/mn10300/mm/fault.c --- kernel-2.6.32.54/linux-2.6.32/arch/mn10300/mm/fault.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/mn10300/mm/fault.c 2012-01-16 14:51:21.721409387 +0100 @@ -339,7 +339,8 @@ out_of_memory: up_read(&mm->mmap_sem); monitor_signal(regs); - printk(KERN_ALERT "VM: killing process %s\n", tsk->comm); + printk(KERN_ALERT "VM: killing process %s(%d:#%u)\n", + tsk->comm, task_pid_nr(tsk), tsk->xid); if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) do_exit(SIGKILL); goto no_context; diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/parisc/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/arch/parisc/Kconfig --- kernel-2.6.32.54/linux-2.6.32/arch/parisc/Kconfig 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/parisc/Kconfig 2012-01-16 14:51:21.721409387 +0100 @@ -294,6 +294,8 @@ source "arch/parisc/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/parisc/kernel/syscall_table.S kernel-2.6.32.54.vs/linux-2.6.32/arch/parisc/kernel/syscall_table.S --- kernel-2.6.32.54/linux-2.6.32/arch/parisc/kernel/syscall_table.S 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/parisc/kernel/syscall_table.S 2012-01-16 14:51:21.721409387 +0100 @@ -361,7 +361,7 @@ ENTRY_COMP(mbind) /* 260 */ ENTRY_COMP(get_mempolicy) ENTRY_COMP(set_mempolicy) - ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */ + ENTRY_DIFF(vserver) ENTRY_SAME(add_key) ENTRY_SAME(request_key) /* 265 */ ENTRY_SAME(keyctl) diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/parisc/kernel/traps.c kernel-2.6.32.54.vs/linux-2.6.32/arch/parisc/kernel/traps.c --- kernel-2.6.32.54/linux-2.6.32/arch/parisc/kernel/traps.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/parisc/kernel/traps.c 2012-01-16 14:51:21.721409387 +0100 @@ -236,8 +236,9 @@ if (err == 0) return; /* STFU */ - printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n", - current->comm, task_pid_nr(current), str, err, regs->iaoq[0]); + printk(KERN_CRIT "%s (pid %d:#%u): %s (code %ld) at " RFMT "\n", + current->comm, task_pid_nr(current), current->xid, + str, err, regs->iaoq[0]); #ifdef PRINT_USER_FAULTS /* XXX for debugging only */ show_regs(regs); @@ -270,8 +271,8 @@ pdc_console_restart(); if (err) - printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n", - current->comm, task_pid_nr(current), str, err); + printk(KERN_CRIT "%s (pid %d:#%u): %s (code %ld)\n", + current->comm, task_pid_nr(current), current->xid, str, err); /* Wot's wrong wif bein' racy? */ if (current->thread.flags & PARISC_KERNEL_DEATH) { diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/parisc/mm/fault.c kernel-2.6.32.54.vs/linux-2.6.32/arch/parisc/mm/fault.c --- kernel-2.6.32.54/linux-2.6.32/arch/parisc/mm/fault.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/parisc/mm/fault.c 2012-01-16 14:51:21.721409387 +0100 @@ -237,8 +237,9 @@ #ifdef PRINT_USER_FAULTS printk(KERN_DEBUG "\n"); - printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n", - task_pid_nr(tsk), tsk->comm, code, address); + printk(KERN_DEBUG "do_page_fault() pid=%d:#%u " + "command='%s' type=%lu address=0x%08lx\n", + task_pid_nr(tsk), tsk->xid, tsk->comm, code, address); if (vma) { printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n", vma->vm_start, vma->vm_end); @@ -264,7 +265,8 @@ out_of_memory: up_read(&mm->mmap_sem); - printk(KERN_CRIT "VM: killing process %s\n", current->comm); + printk(KERN_CRIT "VM: killing process %s(%d:#%u)\n", + current->comm, current->pid, current->xid); if (user_mode(regs)) do_group_exit(SIGKILL); goto no_context; diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/powerpc/include/asm/unistd.h kernel-2.6.32.54.vs/linux-2.6.32/arch/powerpc/include/asm/unistd.h --- kernel-2.6.32.54/linux-2.6.32/arch/powerpc/include/asm/unistd.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/powerpc/include/asm/unistd.h 2012-01-16 14:51:21.729409359 +0100 @@ -275,7 +275,7 @@ #endif #define __NR_rtas 255 #define __NR_sys_debug_setcontext 256 -/* Number 257 is reserved for vserver */ +#define __NR_vserver 257 #define __NR_migrate_pages 258 #define __NR_mbind 259 #define __NR_get_mempolicy 260 diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/powerpc/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/arch/powerpc/Kconfig --- kernel-2.6.32.54/linux-2.6.32/arch/powerpc/Kconfig 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/powerpc/Kconfig 2012-01-16 14:51:21.729409359 +0100 @@ -943,6 +943,8 @@ source "arch/powerpc/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" config KEYS_COMPAT diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/powerpc/kernel/process.c kernel-2.6.32.54.vs/linux-2.6.32/arch/powerpc/kernel/process.c --- kernel-2.6.32.54/linux-2.6.32/arch/powerpc/kernel/process.c 2012-01-16 15:01:37.936584429 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/powerpc/kernel/process.c 2012-01-16 14:51:21.733409345 +0100 @@ -519,8 +519,9 @@ #else printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr); #endif - printk("TASK = %p[%d] '%s' THREAD: %p", - current, task_pid_nr(current), current->comm, task_thread_info(current)); + printk("TASK = %p[%d,#%u] '%s' THREAD: %p", + current, task_pid_nr(current), current->xid, + current->comm, task_thread_info(current)); #ifdef CONFIG_SMP printk(" CPU: %d", raw_smp_processor_id()); diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/powerpc/kernel/traps.c kernel-2.6.32.54.vs/linux-2.6.32/arch/powerpc/kernel/traps.c --- kernel-2.6.32.54/linux-2.6.32/arch/powerpc/kernel/traps.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/powerpc/kernel/traps.c 2012-01-16 14:51:21.733409345 +0100 @@ -931,8 +931,9 @@ void trace_syscall(struct pt_regs *regs) { - printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", - current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], + printk("Task: %p(%d[#%u]), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", + current, task_pid_nr(current), current->xid, + regs->nip, regs->link, regs->gpr[0], regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); } diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/powerpc/kernel/vdso.c kernel-2.6.32.54.vs/linux-2.6.32/arch/powerpc/kernel/vdso.c --- kernel-2.6.32.54/linux-2.6.32/arch/powerpc/kernel/vdso.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/powerpc/kernel/vdso.c 2012-01-16 14:51:21.733409345 +0100 @@ -23,6 +23,7 @@ #include #include #include +#include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/powerpc/mm/fault.c kernel-2.6.32.54.vs/linux-2.6.32/arch/powerpc/mm/fault.c --- kernel-2.6.32.54/linux-2.6.32/arch/powerpc/mm/fault.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/powerpc/mm/fault.c 2012-01-16 14:51:21.733409345 +0100 @@ -358,7 +358,8 @@ down_read(&mm->mmap_sem); goto survive; } - printk("VM: killing process %s\n", current->comm); + printk("VM: killing process %s(%d:#%u)\n", + current->comm, current->pid, current->xid); if (user_mode(regs)) do_group_exit(SIGKILL); return SIGKILL; diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/s390/include/asm/tlb.h kernel-2.6.32.54.vs/linux-2.6.32/arch/s390/include/asm/tlb.h --- kernel-2.6.32.54/linux-2.6.32/arch/s390/include/asm/tlb.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/s390/include/asm/tlb.h 2012-01-16 14:51:21.733409345 +0100 @@ -23,6 +23,8 @@ #include #include +#include + #include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/s390/include/asm/unistd.h kernel-2.6.32.54.vs/linux-2.6.32/arch/s390/include/asm/unistd.h --- kernel-2.6.32.54/linux-2.6.32/arch/s390/include/asm/unistd.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/s390/include/asm/unistd.h 2012-01-16 14:51:21.733409345 +0100 @@ -202,7 +202,7 @@ #define __NR_clock_gettime (__NR_timer_create+6) #define __NR_clock_getres (__NR_timer_create+7) #define __NR_clock_nanosleep (__NR_timer_create+8) -/* Number 263 is reserved for vserver */ +#define __NR_vserver 263 #define __NR_statfs64 265 #define __NR_fstatfs64 266 #define __NR_remap_file_pages 267 diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/s390/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/arch/s390/Kconfig --- kernel-2.6.32.54/linux-2.6.32/arch/s390/Kconfig 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/s390/Kconfig 2012-01-16 14:51:21.733409345 +0100 @@ -616,6 +616,8 @@ source "arch/s390/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/s390/kernel/ptrace.c kernel-2.6.32.54.vs/linux-2.6.32/arch/s390/kernel/ptrace.c --- kernel-2.6.32.54/linux-2.6.32/arch/s390/kernel/ptrace.c 2012-01-16 15:01:37.964598329 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/s390/kernel/ptrace.c 2012-01-16 14:51:21.733409345 +0100 @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/s390/kernel/syscalls.S kernel-2.6.32.54.vs/linux-2.6.32/arch/s390/kernel/syscalls.S --- kernel-2.6.32.54/linux-2.6.32/arch/s390/kernel/syscalls.S 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/s390/kernel/syscalls.S 2012-01-16 14:51:21.733409345 +0100 @@ -271,7 +271,7 @@ SYSCALL(sys_clock_gettime,sys_clock_gettime,sys32_clock_gettime_wrapper) /* 260 */ SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper) SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper) -NI_SYSCALL /* reserved for vserver */ +SYSCALL(sys_vserver,sys_vserver,sys32_vserver) SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper) SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper) SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper) diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/s390/lib/uaccess_pt.c kernel-2.6.32.54.vs/linux-2.6.32/arch/s390/lib/uaccess_pt.c --- kernel-2.6.32.54/linux-2.6.32/arch/s390/lib/uaccess_pt.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/s390/lib/uaccess_pt.c 2012-01-16 14:51:21.733409345 +0100 @@ -90,7 +90,8 @@ down_read(&mm->mmap_sem); goto survive; } - printk("VM: killing process %s\n", current->comm); + printk("VM: killing process %s(%d:#%u)\n", + current->comm, task_pid_nr(current), current->xid); return ret; out_sigbus: diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/sh/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/arch/sh/Kconfig --- kernel-2.6.32.54/linux-2.6.32/arch/sh/Kconfig 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/sh/Kconfig 2012-01-16 14:51:21.733409345 +0100 @@ -853,6 +853,8 @@ source "arch/sh/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/sh/kernel/irq.c kernel-2.6.32.54.vs/linux-2.6.32/arch/sh/kernel/irq.c --- kernel-2.6.32.54/linux-2.6.32/arch/sh/kernel/irq.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/sh/kernel/irq.c 2012-01-16 14:51:21.741409317 +0100 @@ -12,6 +12,7 @@ #include #include #include +// #include #include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/sh/kernel/vsyscall/vsyscall.c kernel-2.6.32.54.vs/linux-2.6.32/arch/sh/kernel/vsyscall/vsyscall.c --- kernel-2.6.32.54/linux-2.6.32/arch/sh/kernel/vsyscall/vsyscall.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/sh/kernel/vsyscall/vsyscall.c 2012-01-16 14:51:21.741409317 +0100 @@ -19,6 +19,7 @@ #include #include #include +#include /* * Should the kernel map a VDSO page into processes and pass its diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/sh/mm/fault_32.c kernel-2.6.32.54.vs/linux-2.6.32/arch/sh/mm/fault_32.c --- kernel-2.6.32.54/linux-2.6.32/arch/sh/mm/fault_32.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/sh/mm/fault_32.c 2012-01-16 14:51:21.741409317 +0100 @@ -292,7 +292,8 @@ down_read(&mm->mmap_sem); goto survive; } - printk("VM: killing process %s\n", tsk->comm); + printk("VM: killing process %s(%d:#%u)\n", + tsk->comm, task_pid_nr(tsk), tsk->xid); if (user_mode(regs)) do_group_exit(SIGKILL); goto no_context; diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/sh/mm/tlbflush_64.c kernel-2.6.32.54.vs/linux-2.6.32/arch/sh/mm/tlbflush_64.c --- kernel-2.6.32.54/linux-2.6.32/arch/sh/mm/tlbflush_64.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/sh/mm/tlbflush_64.c 2012-01-16 14:51:21.741409317 +0100 @@ -306,7 +306,8 @@ down_read(&mm->mmap_sem); goto survive; } - printk("VM: killing process %s\n", tsk->comm); + printk("VM: killing process %s(%d:#%u)\n", + tsk->comm, task_pid_nr(tsk), tsk->xid); if (user_mode(regs)) do_group_exit(SIGKILL); goto no_context; diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/sparc/include/asm/tlb_64.h kernel-2.6.32.54.vs/linux-2.6.32/arch/sparc/include/asm/tlb_64.h --- kernel-2.6.32.54/linux-2.6.32/arch/sparc/include/asm/tlb_64.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/sparc/include/asm/tlb_64.h 2012-01-16 14:51:21.741409317 +0100 @@ -3,6 +3,7 @@ #include #include +#include #include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/sparc/include/asm/unistd.h kernel-2.6.32.54.vs/linux-2.6.32/arch/sparc/include/asm/unistd.h --- kernel-2.6.32.54/linux-2.6.32/arch/sparc/include/asm/unistd.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/sparc/include/asm/unistd.h 2012-01-16 14:51:21.741409317 +0100 @@ -335,7 +335,7 @@ #define __NR_timer_getoverrun 264 #define __NR_timer_delete 265 #define __NR_timer_create 266 -/* #define __NR_vserver 267 Reserved for VSERVER */ +#define __NR_vserver 267 #define __NR_io_setup 268 #define __NR_io_destroy 269 #define __NR_io_submit 270 diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/sparc/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/arch/sparc/Kconfig --- kernel-2.6.32.54/linux-2.6.32/arch/sparc/Kconfig 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/sparc/Kconfig 2012-01-16 14:51:21.741409317 +0100 @@ -550,6 +550,8 @@ source "arch/sparc/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/sparc/kernel/systbls_32.S kernel-2.6.32.54.vs/linux-2.6.32/arch/sparc/kernel/systbls_32.S --- kernel-2.6.32.54/linux-2.6.32/arch/sparc/kernel/systbls_32.S 2012-01-16 15:01:38.008620174 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/sparc/kernel/systbls_32.S 2012-01-16 14:51:21.741409317 +0100 @@ -70,7 +70,7 @@ /*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl /*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep /*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun -/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy +/*265*/ .long sys_timer_delete, sys_timer_create, sys_vserver, sys_io_setup, sys_io_destroy /*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink /*275*/ .long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid /*280*/ .long sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/sparc/kernel/systbls_64.S kernel-2.6.32.54.vs/linux-2.6.32/arch/sparc/kernel/systbls_64.S --- kernel-2.6.32.54/linux-2.6.32/arch/sparc/kernel/systbls_64.S 2012-01-16 15:01:38.008620174 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/sparc/kernel/systbls_64.S 2012-01-16 14:51:21.741409317 +0100 @@ -71,7 +71,7 @@ /*250*/ .word sys32_mremap, sys32_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep /*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun - .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy + .word sys_timer_delete, compat_sys_timer_create, sys32_vserver, compat_sys_io_setup, sys_io_destroy /*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid /*280*/ .word sys32_tee, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat @@ -146,7 +146,7 @@ /*250*/ .word sys_64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl .word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep /*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun - .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy + .word sys_timer_delete, sys_timer_create, sys_vserver, sys_io_setup, sys_io_destroy /*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink .word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid /*280*/ .word sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/um/include/asm/tlb.h kernel-2.6.32.54.vs/linux-2.6.32/arch/um/include/asm/tlb.h --- kernel-2.6.32.54/linux-2.6.32/arch/um/include/asm/tlb.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/um/include/asm/tlb.h 2012-01-16 14:51:21.741409317 +0100 @@ -3,6 +3,7 @@ #include #include +#include #include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/um/Kconfig.rest kernel-2.6.32.54.vs/linux-2.6.32/arch/um/Kconfig.rest --- kernel-2.6.32.54/linux-2.6.32/arch/um/Kconfig.rest 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/um/Kconfig.rest 2012-01-16 14:51:21.741409317 +0100 @@ -18,6 +18,8 @@ source "fs/Kconfig" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/x86/ia32/ia32entry.S kernel-2.6.32.54.vs/linux-2.6.32/arch/x86/ia32/ia32entry.S --- kernel-2.6.32.54/linux-2.6.32/arch/x86/ia32/ia32entry.S 2012-01-16 15:01:38.012622159 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/x86/ia32/ia32entry.S 2012-01-16 14:51:21.741409317 +0100 @@ -783,7 +783,7 @@ .quad sys_tgkill /* 270 */ .quad compat_sys_utimes .quad sys32_fadvise64_64 - .quad quiet_ni_syscall /* sys_vserver */ + .quad sys32_vserver .quad sys_mbind .quad compat_sys_get_mempolicy /* 275 */ .quad sys_set_mempolicy diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/x86/include/asm/unistd_64.h kernel-2.6.32.54.vs/linux-2.6.32/arch/x86/include/asm/unistd_64.h --- kernel-2.6.32.54/linux-2.6.32/arch/x86/include/asm/unistd_64.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/x86/include/asm/unistd_64.h 2012-01-16 14:51:21.749409289 +0100 @@ -535,7 +535,7 @@ #define __NR_utimes 235 __SYSCALL(__NR_utimes, sys_utimes) #define __NR_vserver 236 -__SYSCALL(__NR_vserver, sys_ni_syscall) +__SYSCALL(__NR_vserver, sys_vserver) #define __NR_mbind 237 __SYSCALL(__NR_mbind, sys_mbind) #define __NR_set_mempolicy 238 diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/x86/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/arch/x86/Kconfig --- kernel-2.6.32.54/linux-2.6.32/arch/x86/Kconfig 2012-01-16 15:01:38.008620174 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/x86/Kconfig 2012-01-16 14:51:21.741409317 +0100 @@ -2100,6 +2100,8 @@ source "arch/x86/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/x86/kernel/syscall_table_32.S kernel-2.6.32.54.vs/linux-2.6.32/arch/x86/kernel/syscall_table_32.S --- kernel-2.6.32.54/linux-2.6.32/arch/x86/kernel/syscall_table_32.S 2012-01-16 15:01:38.064647975 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/x86/kernel/syscall_table_32.S 2012-01-16 14:51:21.753409275 +0100 @@ -272,7 +272,7 @@ .long sys_tgkill /* 270 */ .long sys_utimes .long sys_fadvise64_64 - .long sys_ni_syscall /* sys_vserver */ + .long sys_vserver .long sys_mbind .long sys_get_mempolicy .long sys_set_mempolicy diff -Nur kernel-2.6.32.54/linux-2.6.32/arch/xtensa/mm/fault.c kernel-2.6.32.54.vs/linux-2.6.32/arch/xtensa/mm/fault.c --- kernel-2.6.32.54/linux-2.6.32/arch/xtensa/mm/fault.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/arch/xtensa/mm/fault.c 2012-01-16 14:51:21.753409275 +0100 @@ -151,7 +151,8 @@ down_read(&mm->mmap_sem); goto survive; } - printk("VM: killing process %s\n", current->comm); + printk("VM: killing process %s(%d:#%u)\n", + current->comm, task_pid_nr(current), current->xid); if (user_mode(regs)) do_group_exit(SIGKILL); bad_page_fault(regs, address, SIGKILL); diff -Nur kernel-2.6.32.54/linux-2.6.32/Documentation/scheduler/sched-cfs-hard-limits.txt kernel-2.6.32.54.vs/linux-2.6.32/Documentation/scheduler/sched-cfs-hard-limits.txt --- kernel-2.6.32.54/linux-2.6.32/Documentation/scheduler/sched-cfs-hard-limits.txt 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/Documentation/scheduler/sched-cfs-hard-limits.txt 2012-01-16 14:51:21.685409513 +0100 @@ -0,0 +1,48 @@ +CPU HARD LIMITS FOR CFS GROUPS +============================== + +1. Overview +2. Interface +3. Examples + +1. Overview +----------- + +CFS is a proportional share scheduler which tries to divide the CPU time +proportionately between tasks or groups of tasks (task group/cgroup) depending +on the priority/weight of the task or shares assigned to groups of tasks. +In CFS, a task/task group can get more than its share of CPU if there are +enough idle CPU cycles available in the system, due to the work conserving +nature of the scheduler. However in certain scenarios (like pay-per-use), +it is desirable not to provide extra time to a group even in the presence +of idle CPU cycles. This is where hard limiting can be of use. + +Hard limits for task groups can be set by specifying how much CPU runtime a +group can consume within a given period. If the group consumes more CPU time +than the runtime in a given period, it gets throttled. None of the tasks of +the throttled group gets to run until the runtime of the group gets refreshed +at the beginning of the next period. + +2. Interface +------------ + +Hard limit feature adds 2 cgroup files for CFS group scheduler: + +cfs_runtime_us: Hard limit for the group in microseconds. + +cfs_period_us: Time period in microseconds within which hard limits is +enforced. + +A group gets created with default values for runtime (infinite runtime which +means hard limits disabled) and period (0.5s). Each group can set its own +values for runtime and period independent of other groups in the system. + +3. Examples +----------- + +# mount -t cgroup -ocpu none /cgroups/ +# cd /cgroups +# mkdir 1 +# cd 1/ +# echo 250000 > cfs_runtime_us /* set a 250ms runtime or limit */ +# echo 500000 > cfs_period_us /* set a 500ms period */ diff -Nur kernel-2.6.32.54/linux-2.6.32/Documentation/vserver/debug.txt kernel-2.6.32.54.vs/linux-2.6.32/Documentation/vserver/debug.txt --- kernel-2.6.32.54/linux-2.6.32/Documentation/vserver/debug.txt 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/Documentation/vserver/debug.txt 2012-01-16 14:51:21.685409513 +0100 @@ -0,0 +1,154 @@ + +debug_cvirt: + + 2 4 "vx_map_tgid: %p/%llx: %d -> %d" + "vx_rmap_tgid: %p/%llx: %d -> %d" + +debug_dlim: + + 0 1 "ALLOC (%p,#%d)%c inode (%d)" + "FREE (%p,#%d)%c inode" + 1 2 "ALLOC (%p,#%d)%c %lld bytes (%d)" + "FREE (%p,#%d)%c %lld bytes" + 2 4 "ADJUST: %lld,%lld on %ld,%ld [mult=%d]" + 3 8 "ext3_has_free_blocks(%p): %lu<%lu+1, %c, %u!=%u r=%d" + "ext3_has_free_blocks(%p): free=%lu, root=%lu" + "rcu_free_dl_info(%p)" + 4 10 "alloc_dl_info(%p,%d) = %p" + "dealloc_dl_info(%p)" + "get_dl_info(%p[#%d.%d])" + "put_dl_info(%p[#%d.%d])" + 5 20 "alloc_dl_info(%p,%d)*" + 6 40 "__hash_dl_info: %p[#%d]" + "__unhash_dl_info: %p[#%d]" + 7 80 "locate_dl_info(%p,#%d) = %p" + +debug_misc: + + 0 1 "destroy_dqhash: %p [#0x%08x] c=%d" + "new_dqhash: %p [#0x%08x]" + "vroot[%d]_clr_dev: dev=%p[%lu,%d:%d]" + "vroot[%d]_get_real_bdev: dev=%p[%lu,%d:%d]" + "vroot[%d]_set_dev: dev=%p[%lu,%d:%d]" + "vroot_get_real_bdev not set" + 1 2 "cow_break_link(»%s«)" + "temp copy »%s«" + 2 4 "dentry_open(new): %p" + "dentry_open(old): %p" + "lookup_create(new): %p" + "old path »%s«" + "path_lookup(old): %d" + "vfs_create(new): %d" + "vfs_rename: %d" + "vfs_sendfile: %d" + 3 8 "fput(new_file=%p[#%d])" + "fput(old_file=%p[#%d])" + 4 10 "vx_info_kill(%p[#%d],%d,%d) = %d" + "vx_info_kill(%p[#%d],%d,%d)*" + 5 20 "vs_reboot(%p[#%d],%d)" + 6 40 "dropping task %p[#%u,%u] for %p[#%u,%u]" + +debug_net: + + 2 4 "nx_addr_conflict(%p,%p) %d.%d,%d.%d" + 3 8 "inet_bind(%p) %d.%d.%d.%d, %d.%d.%d.%d, %d.%d.%d.%d" + "inet_bind(%p)* %p,%p;%lx %d.%d.%d.%d" + 4 10 "ip_route_connect(%p) %p,%p;%lx" + 5 20 "__addr_in_socket(%p,%d.%d.%d.%d) %p:%d.%d.%d.%d %p;%lx" + 6 40 "sk,egf: %p [#%d] (from %d)" + "sk,egn: %p [#%d] (from %d)" + "sk,req: %p [#%d] (from %d)" + "sk: %p [#%d] (from %d)" + "tw: %p [#%d] (from %d)" + 7 80 "__sock_recvmsg: %p[%p,%p,%p;%d]:%d/%d" + "__sock_sendmsg: %p[%p,%p,%p;%d]:%d/%d" + +debug_nid: + + 0 1 "__lookup_nx_info(#%u): %p[#%u]" + "alloc_nx_info(%d) = %p" + "create_nx_info(%d) (dynamic rejected)" + "create_nx_info(%d) = %p (already there)" + "create_nx_info(%d) = %p (new)" + "dealloc_nx_info(%p)" + 1 2 "alloc_nx_info(%d)*" + "create_nx_info(%d)*" + 2 4 "get_nx_info(%p[#%d.%d])" + "put_nx_info(%p[#%d.%d])" + 3 8 "claim_nx_info(%p[#%d.%d.%d]) %p" + "clr_nx_info(%p[#%d.%d])" + "init_nx_info(%p[#%d.%d])" + "release_nx_info(%p[#%d.%d.%d]) %p" + "set_nx_info(%p[#%d.%d])" + 4 10 "__hash_nx_info: %p[#%d]" + "__nx_dynamic_id: [#%d]" + "__unhash_nx_info: %p[#%d.%d.%d]" + 5 20 "moved task %p into nxi:%p[#%d]" + "nx_migrate_task(%p,%p[#%d.%d.%d])" + "task_get_nx_info(%p)" + 6 40 "nx_clear_persistent(%p[#%d])" + +debug_quota: + + 0 1 "quota_sync_dqh(%p,%d) discard inode %p" + 1 2 "quota_sync_dqh(%p,%d)" + "sync_dquots(%p,%d)" + "sync_dquots_dqh(%p,%d)" + 3 8 "do_quotactl(%p,%d,cmd=%d,id=%d,%p)" + +debug_switch: + + 0 1 "vc: VCMD_%02d_%d[%d], %d,%p [%d,%d,%x,%x]" + 1 2 "vc: VCMD_%02d_%d[%d] = %08lx(%ld) [%d,%d]" + 4 10 "%s: (%s %s) returned %s with %d" + +debug_tag: + + 7 80 "dx_parse_tag(»%s«): %d:#%d" + "dx_propagate_tag(%p[#%lu.%d]): %d,%d" + +debug_xid: + + 0 1 "__lookup_vx_info(#%u): %p[#%u]" + "alloc_vx_info(%d) = %p" + "alloc_vx_info(%d)*" + "create_vx_info(%d) (dynamic rejected)" + "create_vx_info(%d) = %p (already there)" + "create_vx_info(%d) = %p (new)" + "dealloc_vx_info(%p)" + "loc_vx_info(%d) = %p (found)" + "loc_vx_info(%d) = %p (new)" + "loc_vx_info(%d) = %p (not available)" + 1 2 "create_vx_info(%d)*" + "loc_vx_info(%d)*" + 2 4 "get_vx_info(%p[#%d.%d])" + "put_vx_info(%p[#%d.%d])" + 3 8 "claim_vx_info(%p[#%d.%d.%d]) %p" + "clr_vx_info(%p[#%d.%d])" + "init_vx_info(%p[#%d.%d])" + "release_vx_info(%p[#%d.%d.%d]) %p" + "set_vx_info(%p[#%d.%d])" + 4 10 "__hash_vx_info: %p[#%d]" + "__unhash_vx_info: %p[#%d.%d.%d]" + "__vx_dynamic_id: [#%d]" + 5 20 "enter_vx_info(%p[#%d],%p) %p[#%d,%p]" + "leave_vx_info(%p[#%d,%p]) %p[#%d,%p]" + "moved task %p into vxi:%p[#%d]" + "task_get_vx_info(%p)" + "vx_migrate_task(%p,%p[#%d.%d])" + 6 40 "vx_clear_persistent(%p[#%d])" + "vx_exit_init(%p[#%d],%p[#%d,%d,%d])" + "vx_set_init(%p[#%d],%p[#%d,%d,%d])" + "vx_set_persistent(%p[#%d])" + "vx_set_reaper(%p[#%d],%p[#%d,%d])" + 7 80 "vx_child_reaper(%p[#%u,%u]) = %p[#%u,%u]" + + +debug_limit: + + n 2^n "vx_acc_cres[%5d,%s,%2d]: %5d%s" + "vx_cres_avail[%5d,%s,%2d]: %5ld > %5d + %5d" + + m 2^m "vx_acc_page[%5d,%s,%2d]: %5d%s" + "vx_acc_pages[%5d,%s,%2d]: %5d += %5d" + "vx_pages_avail[%5d,%s,%2d]: %5ld > %5d + %5d" diff -Nur kernel-2.6.32.54/linux-2.6.32/drivers/block/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/drivers/block/Kconfig --- kernel-2.6.32.54/linux-2.6.32/drivers/block/Kconfig 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/drivers/block/Kconfig 2012-01-16 14:51:21.753409275 +0100 @@ -271,6 +271,13 @@ instead, which can be configured to be on-disk compatible with the cryptoloop device. +config BLK_DEV_VROOT + tristate "Virtual Root device support" + depends on QUOTACTL + ---help--- + Saying Y here will allow you to use quota/fs ioctls on a shared + partition within a virtual server without compromising security. + config BLK_DEV_NBD tristate "Network block device support" depends on NET diff -Nur kernel-2.6.32.54/linux-2.6.32/drivers/block/loop.c kernel-2.6.32.54.vs/linux-2.6.32/drivers/block/loop.c --- kernel-2.6.32.54/linux-2.6.32/drivers/block/loop.c 2012-01-16 15:01:38.136683721 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/drivers/block/loop.c 2012-01-16 14:51:21.753409275 +0100 @@ -74,6 +74,7 @@ #include #include #include +#include #include @@ -814,6 +815,7 @@ lo->lo_blocksize = lo_blocksize; lo->lo_device = bdev; lo->lo_flags = lo_flags; + lo->lo_xid = vx_current_xid(); lo->lo_backing_file = file; lo->transfer = transfer_none; lo->ioctl = NULL; @@ -939,6 +941,7 @@ lo->lo_encrypt_key_size = 0; lo->lo_flags = 0; lo->lo_thread = NULL; + lo->lo_xid = 0; memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); memset(lo->lo_file_name, 0, LO_NAME_SIZE); @@ -973,7 +976,7 @@ if (lo->lo_encrypt_key_size && lo->lo_key_owner != uid && - !capable(CAP_SYS_ADMIN)) + !vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_CLOOP)) return -EPERM; if (lo->lo_state != Lo_bound) return -ENXIO; @@ -1057,7 +1060,8 @@ memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE); info->lo_encrypt_type = lo->lo_encryption ? lo->lo_encryption->number : 0; - if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) { + if (lo->lo_encrypt_key_size && + vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_CLOOP)) { info->lo_encrypt_key_size = lo->lo_encrypt_key_size; memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, lo->lo_encrypt_key_size); @@ -1401,6 +1405,9 @@ { struct loop_device *lo = bdev->bd_disk->private_data; + if (!vx_check(lo->lo_xid, VS_IDENT|VS_HOSTID|VS_ADMIN_P)) + return -EACCES; + mutex_lock(&lo->lo_ctl_mutex); lo->lo_refcnt++; mutex_unlock(&lo->lo_ctl_mutex); diff -Nur kernel-2.6.32.54/linux-2.6.32/drivers/block/Makefile kernel-2.6.32.54.vs/linux-2.6.32/drivers/block/Makefile --- kernel-2.6.32.54/linux-2.6.32/drivers/block/Makefile 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/drivers/block/Makefile 2012-01-16 14:51:21.753409275 +0100 @@ -34,6 +34,7 @@ obj-$(CONFIG_BLK_DEV_SX8) += sx8.o obj-$(CONFIG_BLK_DEV_UB) += ub.o obj-$(CONFIG_BLK_DEV_HD) += hd.o +obj-$(CONFIG_BLK_DEV_VROOT) += vroot.o obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o diff -Nur kernel-2.6.32.54/linux-2.6.32/drivers/block/vroot.c kernel-2.6.32.54.vs/linux-2.6.32/drivers/block/vroot.c --- kernel-2.6.32.54/linux-2.6.32/drivers/block/vroot.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/drivers/block/vroot.c 2012-01-16 14:51:21.753409275 +0100 @@ -0,0 +1,281 @@ +/* + * linux/drivers/block/vroot.c + * + * written by Herbert Pötzl, 9/11/2002 + * ported to 2.6.10 by Herbert Pötzl, 30/12/2004 + * + * based on the loop.c code by Theodore Ts'o. + * + * Copyright (C) 2002-2007 by Herbert Pötzl. + * Redistribution of this file is permitted under the + * GNU General Public License. + * + */ + +#include +#include +#include +#include +#include + +#include +#include + + +static int max_vroot = 8; + +static struct vroot_device *vroot_dev; +static struct gendisk **disks; + + +static int vroot_set_dev( + struct vroot_device *vr, + struct block_device *bdev, + unsigned int arg) +{ + struct block_device *real_bdev; + struct file *file; + struct inode *inode; + int error; + + error = -EBUSY; + if (vr->vr_state != Vr_unbound) + goto out; + + error = -EBADF; + file = fget(arg); + if (!file) + goto out; + + error = -EINVAL; + inode = file->f_dentry->d_inode; + + + if (S_ISBLK(inode->i_mode)) { + real_bdev = inode->i_bdev; + vr->vr_device = real_bdev; + __iget(real_bdev->bd_inode); + } else + goto out_fput; + + vxdprintk(VXD_CBIT(misc, 0), + "vroot[%d]_set_dev: dev=" VXF_DEV, + vr->vr_number, VXD_DEV(real_bdev)); + + vr->vr_state = Vr_bound; + error = 0; + + out_fput: + fput(file); + out: + return error; +} + +static int vroot_clr_dev( + struct vroot_device *vr, + struct block_device *bdev) +{ + struct block_device *real_bdev; + + if (vr->vr_state != Vr_bound) + return -ENXIO; + if (vr->vr_refcnt > 1) /* we needed one fd for the ioctl */ + return -EBUSY; + + real_bdev = vr->vr_device; + + vxdprintk(VXD_CBIT(misc, 0), + "vroot[%d]_clr_dev: dev=" VXF_DEV, + vr->vr_number, VXD_DEV(real_bdev)); + + bdput(real_bdev); + vr->vr_state = Vr_unbound; + vr->vr_device = NULL; + return 0; +} + + +static int vr_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct vroot_device *vr = bdev->bd_disk->private_data; + int err; + + down(&vr->vr_ctl_mutex); + switch (cmd) { + case VROOT_SET_DEV: + err = vroot_set_dev(vr, bdev, arg); + break; + case VROOT_CLR_DEV: + err = vroot_clr_dev(vr, bdev); + break; + default: + err = -EINVAL; + break; + } + up(&vr->vr_ctl_mutex); + return err; +} + +static int vr_open(struct block_device *bdev, fmode_t mode) +{ + struct vroot_device *vr = bdev->bd_disk->private_data; + + down(&vr->vr_ctl_mutex); + vr->vr_refcnt++; + up(&vr->vr_ctl_mutex); + return 0; +} + +static int vr_release(struct gendisk *disk, fmode_t mode) +{ + struct vroot_device *vr = disk->private_data; + + down(&vr->vr_ctl_mutex); + --vr->vr_refcnt; + up(&vr->vr_ctl_mutex); + return 0; +} + +static struct block_device_operations vr_fops = { + .owner = THIS_MODULE, + .open = vr_open, + .release = vr_release, + .ioctl = vr_ioctl, +}; + +struct block_device *__vroot_get_real_bdev(struct block_device *bdev) +{ + struct inode *inode = bdev->bd_inode; + struct vroot_device *vr; + struct block_device *real_bdev; + int minor = iminor(inode); + + vr = &vroot_dev[minor]; + real_bdev = vr->vr_device; + + vxdprintk(VXD_CBIT(misc, 0), + "vroot[%d]_get_real_bdev: dev=" VXF_DEV, + vr->vr_number, VXD_DEV(real_bdev)); + + if (vr->vr_state != Vr_bound) + return ERR_PTR(-ENXIO); + + __iget(real_bdev->bd_inode); + return real_bdev; +} + +/* + * And now the modules code and kernel interface. + */ + +module_param(max_vroot, int, 0); + +MODULE_PARM_DESC(max_vroot, "Maximum number of vroot devices (1-256)"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_BLOCKDEV_MAJOR(VROOT_MAJOR); + +MODULE_AUTHOR ("Herbert Pötzl"); +MODULE_DESCRIPTION ("Virtual Root Device Mapper"); + + +int __init vroot_init(void) +{ + int err, i; + + if (max_vroot < 1 || max_vroot > 256) { + max_vroot = MAX_VROOT_DEFAULT; + printk(KERN_WARNING "vroot: invalid max_vroot " + "(must be between 1 and 256), " + "using default (%d)\n", max_vroot); + } + + if (register_blkdev(VROOT_MAJOR, "vroot")) + return -EIO; + + err = -ENOMEM; + vroot_dev = kmalloc(max_vroot * sizeof(struct vroot_device), GFP_KERNEL); + if (!vroot_dev) + goto out_mem1; + memset(vroot_dev, 0, max_vroot * sizeof(struct vroot_device)); + + disks = kmalloc(max_vroot * sizeof(struct gendisk *), GFP_KERNEL); + if (!disks) + goto out_mem2; + + for (i = 0; i < max_vroot; i++) { + disks[i] = alloc_disk(1); + if (!disks[i]) + goto out_mem3; + disks[i]->queue = blk_alloc_queue(GFP_KERNEL); + if (!disks[i]->queue) + goto out_mem3; + } + + for (i = 0; i < max_vroot; i++) { + struct vroot_device *vr = &vroot_dev[i]; + struct gendisk *disk = disks[i]; + + memset(vr, 0, sizeof(*vr)); + init_MUTEX(&vr->vr_ctl_mutex); + vr->vr_number = i; + disk->major = VROOT_MAJOR; + disk->first_minor = i; + disk->fops = &vr_fops; + sprintf(disk->disk_name, "vroot%d", i); + disk->private_data = vr; + } + + err = register_vroot_grb(&__vroot_get_real_bdev); + if (err) + goto out_mem3; + + for (i = 0; i < max_vroot; i++) + add_disk(disks[i]); + printk(KERN_INFO "vroot: loaded (max %d devices)\n", max_vroot); + return 0; + +out_mem3: + while (i--) + put_disk(disks[i]); + kfree(disks); +out_mem2: + kfree(vroot_dev); +out_mem1: + unregister_blkdev(VROOT_MAJOR, "vroot"); + printk(KERN_ERR "vroot: ran out of memory\n"); + return err; +} + +void vroot_exit(void) +{ + int i; + + if (unregister_vroot_grb(&__vroot_get_real_bdev)) + printk(KERN_WARNING "vroot: cannot unregister grb\n"); + + for (i = 0; i < max_vroot; i++) { + del_gendisk(disks[i]); + put_disk(disks[i]); + } + unregister_blkdev(VROOT_MAJOR, "vroot"); + + kfree(disks); + kfree(vroot_dev); +} + +module_init(vroot_init); +module_exit(vroot_exit); + +#ifndef MODULE + +static int __init max_vroot_setup(char *str) +{ + max_vroot = simple_strtol(str, NULL, 0); + return 1; +} + +__setup("max_vroot=", max_vroot_setup); + +#endif + diff -Nur kernel-2.6.32.54/linux-2.6.32/drivers/char/sysrq.c kernel-2.6.32.54.vs/linux-2.6.32/drivers/char/sysrq.c --- kernel-2.6.32.54/linux-2.6.32/drivers/char/sysrq.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/drivers/char/sysrq.c 2012-01-16 14:51:21.753409275 +0100 @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -391,6 +392,21 @@ .enable_mask = SYSRQ_ENABLE_RTNICE, }; + +#ifdef CONFIG_VSERVER_DEBUG +static void sysrq_handle_vxinfo(int key, struct tty_struct *tty) +{ + dump_vx_info_inactive((key == 'x')?0:1); +} + +static struct sysrq_key_op sysrq_showvxinfo_op = { + .handler = sysrq_handle_vxinfo, + .help_msg = "conteXt", + .action_msg = "Show Context Info", + .enable_mask = SYSRQ_ENABLE_DUMP, +}; +#endif + /* Key Operations table and lock */ static DEFINE_SPINLOCK(sysrq_key_table_lock); @@ -445,7 +461,11 @@ NULL, /* v */ &sysrq_showstate_blocked_op, /* w */ /* x: May be registered on ppc/powerpc for xmon */ +#ifdef CONFIG_VSERVER_DEBUG + &sysrq_showvxinfo_op, /* x */ +#else NULL, /* x */ +#endif /* y: May be registered on sparc64 for global register dump */ NULL, /* y */ &sysrq_ftrace_dump_op, /* z */ @@ -460,6 +480,8 @@ retval = key - '0'; else if ((key >= 'a') && (key <= 'z')) retval = key + 10 - 'a'; + else if ((key >= 'A') && (key <= 'Z')) + retval = key + 10 - 'A'; else retval = -1; return retval; diff -Nur kernel-2.6.32.54/linux-2.6.32/drivers/char/tty_io.c kernel-2.6.32.54.vs/linux-2.6.32/drivers/char/tty_io.c --- kernel-2.6.32.54/linux-2.6.32/drivers/char/tty_io.c 2012-01-16 15:01:38.152691664 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/drivers/char/tty_io.c 2012-01-16 14:51:21.753409275 +0100 @@ -107,6 +107,7 @@ #include #include +#include #undef TTY_DEBUG_HANGUP @@ -1971,7 +1972,8 @@ char ch, mbz = 0; struct tty_ldisc *ld; - if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN)) + if (((current->signal->tty != tty) && + !vx_capable(CAP_SYS_ADMIN, VXC_TIOCSTI))) return -EPERM; if (get_user(ch, p)) return -EFAULT; @@ -2259,6 +2261,7 @@ return -ENOTTY; if (get_user(pgrp_nr, p)) return -EFAULT; + pgrp_nr = vx_rmap_pid(pgrp_nr); if (pgrp_nr < 0) return -EINVAL; rcu_read_lock(); diff -Nur kernel-2.6.32.54/linux-2.6.32/drivers/infiniband/hw/ipath/ipath_user_pages.c kernel-2.6.32.54.vs/linux-2.6.32/drivers/infiniband/hw/ipath/ipath_user_pages.c --- kernel-2.6.32.54/linux-2.6.32/drivers/infiniband/hw/ipath/ipath_user_pages.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/drivers/infiniband/hw/ipath/ipath_user_pages.c 2012-01-16 14:51:21.753409275 +0100 @@ -34,6 +34,7 @@ #include #include #include +#include #include "ipath_kernel.h" @@ -62,7 +63,8 @@ lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; - if (num_pages > lock_limit) { + if (num_pages > lock_limit || + !vx_vmlocked_avail(current->mm, num_pages)) { ret = -ENOMEM; goto bail; } @@ -79,7 +81,7 @@ goto bail_release; } - current->mm->locked_vm += num_pages; + vx_vmlocked_add(current->mm, num_pages); ret = 0; goto bail; @@ -178,7 +180,7 @@ __ipath_release_user_pages(p, num_pages, 1); - current->mm->locked_vm -= num_pages; + vx_vmlocked_sub(current->mm, num_pages); up_write(¤t->mm->mmap_sem); } @@ -195,7 +197,7 @@ container_of(_work, struct ipath_user_pages_work, work); down_write(&work->mm->mmap_sem); - work->mm->locked_vm -= work->num_pages; + vx_vmlocked_sub(work->mm, work->num_pages); up_write(&work->mm->mmap_sem); mmput(work->mm); kfree(work); diff -Nur kernel-2.6.32.54/linux-2.6.32/drivers/md/dm.c kernel-2.6.32.54.vs/linux-2.6.32/drivers/md/dm.c --- kernel-2.6.32.54/linux-2.6.32/drivers/md/dm.c 2012-01-16 15:01:38.652729894 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/drivers/md/dm.c 2012-01-16 14:51:21.761409247 +0100 @@ -19,6 +19,7 @@ #include #include #include +#include #include @@ -121,6 +122,7 @@ rwlock_t map_lock; atomic_t holders; atomic_t open_count; + xid_t xid; unsigned long flags; @@ -331,6 +333,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode) { struct mapped_device *md; + int ret = -ENXIO; spin_lock(&_minor_lock); @@ -339,18 +342,19 @@ goto out; if (test_bit(DMF_FREEING, &md->flags) || - test_bit(DMF_DELETING, &md->flags)) { - md = NULL; + test_bit(DMF_DELETING, &md->flags)) + goto out; + + ret = -EACCES; + if (!vx_check(md->xid, VS_IDENT|VS_HOSTID)) goto out; - } dm_get(md); atomic_inc(&md->open_count); - + ret = 0; out: spin_unlock(&_minor_lock); - - return md ? 0 : -ENXIO; + return ret; } static int dm_blk_close(struct gendisk *disk, fmode_t mode) @@ -561,6 +565,14 @@ return 0; } +/* + * Get the xid associated with a dm device + */ +xid_t dm_get_xid(struct mapped_device *md) +{ + return md->xid; +} + /*----------------------------------------------------------------- * CRUD START: * A more elegant soln is in the works that uses the queue @@ -1781,6 +1793,7 @@ INIT_LIST_HEAD(&md->uevent_list); spin_lock_init(&md->uevent_lock); + md->xid = vx_current_xid(); md->queue = blk_init_queue(dm_request_fn, NULL); if (!md->queue) goto bad_queue; diff -Nur kernel-2.6.32.54/linux-2.6.32/drivers/md/dm.h kernel-2.6.32.54.vs/linux-2.6.32/drivers/md/dm.h --- kernel-2.6.32.54/linux-2.6.32/drivers/md/dm.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/drivers/md/dm.h 2012-01-16 14:51:21.761409247 +0100 @@ -41,6 +41,8 @@ struct dm_table; struct dm_md_mempools; +xid_t dm_get_xid(struct mapped_device *md); + /*----------------------------------------------------------------- * Internal table functions. *---------------------------------------------------------------*/ diff -Nur kernel-2.6.32.54/linux-2.6.32/drivers/md/dm-ioctl.c kernel-2.6.32.54.vs/linux-2.6.32/drivers/md/dm-ioctl.c --- kernel-2.6.32.54/linux-2.6.32/drivers/md/dm-ioctl.c 2012-01-16 15:01:38.644729922 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/drivers/md/dm-ioctl.c 2012-01-16 14:51:21.761409247 +0100 @@ -16,6 +16,7 @@ #include #include #include +#include #include @@ -106,7 +107,8 @@ unsigned int h = hash_str(str); list_for_each_entry (hc, _name_buckets + h, name_list) - if (!strcmp(hc->name, str)) { + if (vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT) && + !strcmp(hc->name, str)) { dm_get(hc->md); return hc; } @@ -120,7 +122,8 @@ unsigned int h = hash_str(str); list_for_each_entry (hc, _uuid_buckets + h, uuid_list) - if (!strcmp(hc->uuid, str)) { + if (vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT) && + !strcmp(hc->uuid, str)) { dm_get(hc->md); return hc; } @@ -369,6 +372,9 @@ static int remove_all(struct dm_ioctl *param, size_t param_size) { + if (!vx_check(0, VS_ADMIN)) + return -EPERM; + dm_hash_remove_all(1); param->data_size = 0; return 0; @@ -416,6 +422,8 @@ */ for (i = 0; i < NUM_BUCKETS; i++) { list_for_each_entry (hc, _name_buckets + i, name_list) { + if (!vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT)) + continue; needed += sizeof(struct dm_name_list); needed += strlen(hc->name) + 1; needed += ALIGN_MASK; @@ -439,6 +447,8 @@ */ for (i = 0; i < NUM_BUCKETS; i++) { list_for_each_entry (hc, _name_buckets + i, name_list) { + if (!vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT)) + continue; if (old_nl) old_nl->next = (uint32_t) ((void *) nl - (void *) old_nl); @@ -629,10 +639,11 @@ if (!md) goto out; - mdptr = dm_get_mdptr(md); + if (vx_check(dm_get_xid(md), VS_WATCH_P | VS_IDENT)) + mdptr = dm_get_mdptr(md); + if (!mdptr) dm_put(md); - out: return mdptr; } @@ -1462,8 +1473,8 @@ ioctl_fn fn = NULL; size_t param_size; - /* only root can play with this */ - if (!capable(CAP_SYS_ADMIN)) + /* only root and certain contexts can play with this */ + if (!vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_MAPPER)) return -EACCES; if (_IOC_TYPE(command) != DM_IOCTL) diff -Nur kernel-2.6.32.54/linux-2.6.32/drivers/net/tun.c kernel-2.6.32.54.vs/linux-2.6.32/drivers/net/tun.c --- kernel-2.6.32.54/linux-2.6.32/drivers/net/tun.c 2012-01-16 15:01:38.944728860 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/drivers/net/tun.c 2012-01-16 14:51:21.765409233 +0100 @@ -61,6 +61,7 @@ #include #include #include +#include #include #include #include @@ -102,6 +103,7 @@ unsigned int flags; uid_t owner; gid_t group; + nid_t nid; struct net_device *dev; struct fasync_struct *fasync; @@ -816,6 +818,7 @@ tun->owner = -1; tun->group = -1; + tun->nid = current->nid; dev->ethtool_ops = &tun_ethtool_ops; dev->destructor = tun_free_netdev; @@ -932,7 +935,7 @@ if (((tun->owner != -1 && cred->euid != tun->owner) || (tun->group != -1 && !in_egroup_p(tun->group))) && - !capable(CAP_NET_ADMIN)) + !cap_raised(current_cap(), CAP_NET_ADMIN)) return -EPERM; err = security_tun_dev_attach(tun->socket.sk); if (err < 0) @@ -946,7 +949,7 @@ char *name; unsigned long flags = 0; - if (!capable(CAP_NET_ADMIN)) + if (!nx_capable(CAP_NET_ADMIN, NXC_TUN_CREATE)) return -EPERM; err = security_tun_dev_create(); if (err < 0) @@ -1014,6 +1017,9 @@ sk->sk_destruct = tun_sock_destruct; + if (!nx_check(tun->nid, VS_IDENT | VS_HOSTID | VS_ADMIN_P)) + return -EPERM; + err = tun_attach(tun, file); if (err < 0) goto failed; @@ -1203,6 +1209,16 @@ DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group); break; + case TUNSETNID: + if (!capable(CAP_CONTEXT)) + return -EPERM; + + /* Set nid owner of the device */ + tun->nid = (nid_t) arg; + + DBG(KERN_INFO "%s: nid owner set to %u\n", tun->dev->name, tun->nid); + break; + case TUNSETLINK: /* Only allow setting the type when the interface is down */ if (tun->dev->flags & IFF_UP) { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/attr.c kernel-2.6.32.54.vs/linux-2.6.32/fs/attr.c --- kernel-2.6.32.54/linux-2.6.32/fs/attr.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/attr.c 2012-01-16 14:51:21.765409233 +0100 @@ -14,6 +14,9 @@ #include #include #include +#include +#include +#include /* Taken over from the old code... */ @@ -55,6 +58,10 @@ if (!is_owner_or_cap(inode)) goto error; } + + if (dx_permission(inode, MAY_WRITE)) + goto error; + fine: retval = 0; error: @@ -120,6 +127,8 @@ inode->i_uid = attr->ia_uid; if (ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; + if ((ia_valid & ATTR_TAG) && IS_TAGGED(inode)) + inode->i_tag = attr->ia_tag; if (ia_valid & ATTR_ATIME) inode->i_atime = timespec_trunc(attr->ia_atime, inode->i_sb->s_time_gran); @@ -214,7 +223,8 @@ error = inode_change_ok(inode, attr); if (!error) { if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || - (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) + (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) || + (ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag)) error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; if (!error) diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/binfmt_aout.c kernel-2.6.32.54.vs/linux-2.6.32/fs/binfmt_aout.c --- kernel-2.6.32.54/linux-2.6.32/fs/binfmt_aout.c 2012-01-16 15:01:39.260727742 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/binfmt_aout.c 2012-01-16 14:51:21.765409233 +0100 @@ -24,6 +24,7 @@ #include #include #include +#include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/binfmt_elf.c kernel-2.6.32.54.vs/linux-2.6.32/fs/binfmt_elf.c --- kernel-2.6.32.54/linux-2.6.32/fs/binfmt_elf.c 2012-01-16 15:01:39.260727742 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/binfmt_elf.c 2012-01-16 14:51:21.765409233 +0100 @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/binfmt_flat.c kernel-2.6.32.54.vs/linux-2.6.32/fs/binfmt_flat.c --- kernel-2.6.32.54/linux-2.6.32/fs/binfmt_flat.c 2012-01-16 15:01:39.264727728 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/binfmt_flat.c 2012-01-16 14:51:21.765409233 +0100 @@ -35,6 +35,7 @@ #include #include #include +#include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/binfmt_som.c kernel-2.6.32.54.vs/linux-2.6.32/fs/binfmt_som.c --- kernel-2.6.32.54/linux-2.6.32/fs/binfmt_som.c 2012-01-16 15:01:39.264727728 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/binfmt_som.c 2012-01-16 14:51:21.765409233 +0100 @@ -28,6 +28,7 @@ #include #include #include +#include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/block_dev.c kernel-2.6.32.54.vs/linux-2.6.32/fs/block_dev.c --- kernel-2.6.32.54/linux-2.6.32/fs/block_dev.c 2012-01-16 15:01:40.264724188 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/block_dev.c 2012-01-16 14:51:21.765409233 +0100 @@ -26,6 +26,7 @@ #include #include #include +#include #include #include "internal.h" @@ -644,6 +645,7 @@ bdev->bd_invalidated = 0; inode->i_mode = S_IFBLK; inode->i_rdev = dev; + inode->i_mdev = dev; inode->i_bdev = bdev; inode->i_data.a_ops = &def_blk_aops; mapping_set_gfp_mask(&inode->i_data, GFP_USER); @@ -690,6 +692,11 @@ static struct block_device *bd_acquire(struct inode *inode) { struct block_device *bdev; + dev_t mdev; + + if (!vs_map_blkdev(inode->i_rdev, &mdev, DATTR_OPEN)) + return NULL; + inode->i_mdev = mdev; spin_lock(&bdev_lock); bdev = inode->i_bdev; @@ -700,7 +707,7 @@ } spin_unlock(&bdev_lock); - bdev = bdget(inode->i_rdev); + bdev = bdget(mdev); if (bdev) { spin_lock(&bdev_lock); if (!inode->i_bdev) { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/block_dev.c.orig kernel-2.6.32.54.vs/linux-2.6.32/fs/block_dev.c.orig --- kernel-2.6.32.54/linux-2.6.32/fs/block_dev.c.orig 2012-01-16 15:01:39.264727728 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/block_dev.c.orig 2012-01-16 14:47:19.462254899 +0100 @@ -335,6 +335,93 @@ } EXPORT_SYMBOL(thaw_bdev); +#ifdef CONFIG_FS_FREEZER_DEBUG +#define FS_PRINTK(fmt, args...) printk(fmt, ## args) +#else +#define FS_PRINTK(fmt, args...) +#endif + +/* #define DEBUG_FS_FREEZING */ + +/** + * freeze_filesystems - lock all filesystems and force them into a consistent + * state + * @which: What combination of fuse & non-fuse to freeze. + */ +void freeze_filesystems(int which) +{ + struct super_block *sb; + + lockdep_off(); + + /* + * Freeze in reverse order so filesystems dependant upon others are + * frozen in the right order (eg. loopback on ext3). + */ + list_for_each_entry_reverse(sb, &super_blocks, s_list) { + FS_PRINTK(KERN_INFO "Considering %s.%s: (root %p, bdev %x)", + sb->s_type->name ? sb->s_type->name : "?", + sb->s_subtype ? sb->s_subtype : "", sb->s_root, + sb->s_bdev ? sb->s_bdev->bd_dev : 0); + + if (sb->s_type->fs_flags & FS_IS_FUSE && + sb->s_frozen == SB_UNFROZEN && + which & FS_FREEZER_FUSE) { + sb->s_frozen = SB_FREEZE_TRANS; + sb->s_flags |= MS_FROZEN; + FS_PRINTK("Fuse filesystem done.\n"); + continue; + } + + if (!sb->s_root || !sb->s_bdev || + (sb->s_frozen == SB_FREEZE_TRANS) || + (sb->s_flags & MS_RDONLY) || + (sb->s_flags & MS_FROZEN) || + !(which & FS_FREEZER_NORMAL)) { + FS_PRINTK(KERN_INFO "Nope.\n"); + continue; + } + + FS_PRINTK(KERN_INFO "Freezing %x... ", sb->s_bdev->bd_dev); + freeze_bdev(sb->s_bdev); + sb->s_flags |= MS_FROZEN; + FS_PRINTK(KERN_INFO "Done.\n"); + } + + lockdep_on(); +} + +/** + * thaw_filesystems - unlock all filesystems + * @which: What combination of fuse & non-fuse to thaw. + */ +void thaw_filesystems(int which) +{ + struct super_block *sb; + + lockdep_off(); + + list_for_each_entry(sb, &super_blocks, s_list) { + if (!(sb->s_flags & MS_FROZEN)) + continue; + + if (sb->s_type->fs_flags & FS_IS_FUSE) { + if (!(which & FS_FREEZER_FUSE)) + continue; + + sb->s_frozen = SB_UNFROZEN; + } else { + if (!(which & FS_FREEZER_NORMAL)) + continue; + + thaw_bdev(sb->s_bdev, sb); + } + sb->s_flags &= ~MS_FROZEN; + } + + lockdep_on(); +} + static int blkdev_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page, blkdev_get_block, wbc); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/btrfs/ctree.h kernel-2.6.32.54.vs/linux-2.6.32/fs/btrfs/ctree.h --- kernel-2.6.32.54/linux-2.6.32/fs/btrfs/ctree.h 2012-01-16 15:01:39.264727728 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/btrfs/ctree.h 2012-01-16 14:51:21.769409219 +0100 @@ -547,11 +547,14 @@ /* modification sequence number for NFS */ __le64 sequence; + __le16 tag; /* * a little future expansion, for more than this we can * just grow the inode item and version it */ - __le64 reserved[4]; + __le16 reserved16; + __le32 reserved32; + __le64 reserved[3]; struct btrfs_timespec atime; struct btrfs_timespec ctime; struct btrfs_timespec mtime; @@ -1162,6 +1165,8 @@ #define BTRFS_MOUNT_NOSSD (1 << 9) #define BTRFS_MOUNT_DISCARD (1 << 10) +#define BTRFS_MOUNT_TAGGED (1 << 24) + #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) #define btrfs_test_opt(root, opt) ((root)->fs_info->mount_opt & \ @@ -1181,6 +1186,10 @@ #define BTRFS_INODE_NOATIME (1 << 9) #define BTRFS_INODE_DIRSYNC (1 << 10) +#define BTRFS_INODE_IXUNLINK (1 << 24) +#define BTRFS_INODE_BARRIER (1 << 25) +#define BTRFS_INODE_COW (1 << 26) + #define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31) @@ -1385,6 +1394,7 @@ BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32); BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32); BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32); +BTRFS_SETGET_FUNCS(inode_tag, struct btrfs_inode_item, tag, 16); BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32); BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64); BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64); @@ -2360,6 +2370,7 @@ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); void btrfs_update_iflags(struct inode *inode); void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); +int btrfs_sync_flags(struct inode *inode, int, int); /* file.c */ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/btrfs/disk-io.c kernel-2.6.32.54.vs/linux-2.6.32/fs/btrfs/disk-io.c --- kernel-2.6.32.54/linux-2.6.32/fs/btrfs/disk-io.c 2012-01-16 15:01:39.264727728 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/btrfs/disk-io.c 2012-01-16 14:51:21.773409205 +0100 @@ -1728,6 +1728,9 @@ goto fail_iput; } + if (btrfs_test_opt(tree_root, TAGGED)) + sb->s_flags |= MS_TAGGED; + features = btrfs_super_incompat_flags(disk_super) & ~BTRFS_FEATURE_INCOMPAT_SUPP; if (features) { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/btrfs/inode.c kernel-2.6.32.54.vs/linux-2.6.32/fs/btrfs/inode.c --- kernel-2.6.32.54/linux-2.6.32/fs/btrfs/inode.c 2012-01-16 15:01:39.276727685 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/btrfs/inode.c 2012-01-16 14:51:21.773409205 +0100 @@ -36,6 +36,8 @@ #include #include #include +#include + #include "compat.h" #include "ctree.h" #include "disk-io.h" @@ -2263,6 +2265,8 @@ int maybe_acls; u64 alloc_group_block; u32 rdev; + uid_t uid; + gid_t gid; int ret; path = btrfs_alloc_path(); @@ -2279,8 +2283,13 @@ inode->i_mode = btrfs_inode_mode(leaf, inode_item); inode->i_nlink = btrfs_inode_nlink(leaf, inode_item); - inode->i_uid = btrfs_inode_uid(leaf, inode_item); - inode->i_gid = btrfs_inode_gid(leaf, inode_item); + + uid = btrfs_inode_uid(leaf, inode_item); + gid = btrfs_inode_gid(leaf, inode_item); + inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid); + inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid); + inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid, + btrfs_inode_tag(leaf, inode_item)); btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); tspec = btrfs_inode_atime(inode_item); @@ -2362,8 +2371,15 @@ struct btrfs_inode_item *item, struct inode *inode) { - btrfs_set_inode_uid(leaf, item, inode->i_uid); - btrfs_set_inode_gid(leaf, item, inode->i_gid); + uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag); + gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag); + + btrfs_set_inode_uid(leaf, item, uid); + btrfs_set_inode_gid(leaf, item, gid); +#ifdef CONFIG_TAGGING_INTERN + btrfs_set_inode_tag(leaf, item, inode->i_tag); +#endif + btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); btrfs_set_inode_mode(leaf, item, inode->i_mode); btrfs_set_inode_nlink(leaf, item, inode->i_nlink); @@ -4153,6 +4169,7 @@ } else inode->i_gid = current_fsgid(); + inode->i_tag = dx_current_fstag(root->fs_info->sb); inode->i_mode = mode; inode->i_ino = objectid; inode_set_bytes(inode, 0); @@ -5954,6 +5971,7 @@ .listxattr = btrfs_listxattr, .removexattr = btrfs_removexattr, .permission = btrfs_permission, + .sync_flags = btrfs_sync_flags, }; static const struct inode_operations btrfs_dir_ro_inode_operations = { .lookup = btrfs_lookup, @@ -6029,6 +6047,7 @@ .permission = btrfs_permission, .fallocate = btrfs_fallocate, .fiemap = btrfs_fiemap, + .sync_flags = btrfs_sync_flags, }; static const struct inode_operations btrfs_special_inode_operations = { .getattr = btrfs_getattr, diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/btrfs/ioctl.c kernel-2.6.32.54.vs/linux-2.6.32/fs/btrfs/ioctl.c --- kernel-2.6.32.54/linux-2.6.32/fs/btrfs/ioctl.c 2012-01-16 15:01:39.276727685 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/btrfs/ioctl.c 2012-01-16 14:51:21.773409205 +0100 @@ -67,10 +67,13 @@ { unsigned int iflags = 0; - if (flags & BTRFS_INODE_SYNC) - iflags |= FS_SYNC_FL; if (flags & BTRFS_INODE_IMMUTABLE) iflags |= FS_IMMUTABLE_FL; + if (flags & BTRFS_INODE_IXUNLINK) + iflags |= FS_IXUNLINK_FL; + + if (flags & BTRFS_INODE_SYNC) + iflags |= FS_SYNC_FL; if (flags & BTRFS_INODE_APPEND) iflags |= FS_APPEND_FL; if (flags & BTRFS_INODE_NODUMP) @@ -80,28 +83,78 @@ if (flags & BTRFS_INODE_DIRSYNC) iflags |= FS_DIRSYNC_FL; + if (flags & BTRFS_INODE_BARRIER) + iflags |= FS_BARRIER_FL; + if (flags & BTRFS_INODE_COW) + iflags |= FS_COW_FL; return iflags; } /* - * Update inode->i_flags based on the btrfs internal flags. + * Update inode->i_(v)flags based on the btrfs internal flags. */ void btrfs_update_iflags(struct inode *inode) { struct btrfs_inode *ip = BTRFS_I(inode); - inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); + inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK | + S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC); - if (ip->flags & BTRFS_INODE_SYNC) - inode->i_flags |= S_SYNC; if (ip->flags & BTRFS_INODE_IMMUTABLE) inode->i_flags |= S_IMMUTABLE; + if (ip->flags & BTRFS_INODE_IXUNLINK) + inode->i_flags |= S_IXUNLINK; + + if (ip->flags & BTRFS_INODE_SYNC) + inode->i_flags |= S_SYNC; if (ip->flags & BTRFS_INODE_APPEND) inode->i_flags |= S_APPEND; if (ip->flags & BTRFS_INODE_NOATIME) inode->i_flags |= S_NOATIME; if (ip->flags & BTRFS_INODE_DIRSYNC) inode->i_flags |= S_DIRSYNC; + + inode->i_vflags &= ~(V_BARRIER | V_COW); + + if (ip->flags & BTRFS_INODE_BARRIER) + inode->i_vflags |= V_BARRIER; + if (ip->flags & BTRFS_INODE_COW) + inode->i_vflags |= V_COW; +} + +/* + * Update btrfs internal flags from inode->i_(v)flags. + */ +void btrfs_update_flags(struct inode *inode) +{ + struct btrfs_inode *ip = BTRFS_I(inode); + + unsigned int flags = inode->i_flags; + unsigned int vflags = inode->i_vflags; + + ip->flags &= ~(BTRFS_INODE_SYNC | BTRFS_INODE_APPEND | + BTRFS_INODE_IMMUTABLE | BTRFS_INODE_IXUNLINK | + BTRFS_INODE_NOATIME | BTRFS_INODE_DIRSYNC | + BTRFS_INODE_BARRIER | BTRFS_INODE_COW); + + if (flags & S_IMMUTABLE) + ip->flags |= BTRFS_INODE_IMMUTABLE; + if (flags & S_IXUNLINK) + ip->flags |= BTRFS_INODE_IXUNLINK; + + if (flags & S_SYNC) + ip->flags |= BTRFS_INODE_SYNC; + if (flags & S_APPEND) + ip->flags |= BTRFS_INODE_APPEND; + if (flags & S_NOATIME) + ip->flags |= BTRFS_INODE_NOATIME; + if (flags & S_DIRSYNC) + ip->flags |= BTRFS_INODE_DIRSYNC; + + if (vflags & V_BARRIER) + ip->flags |= BTRFS_INODE_BARRIER; + if (vflags & V_COW) + ip->flags |= BTRFS_INODE_COW; } /* @@ -119,7 +172,7 @@ flags = BTRFS_I(dir)->flags; if (S_ISREG(inode->i_mode)) - flags &= ~BTRFS_INODE_DIRSYNC; + flags &= ~(BTRFS_INODE_DIRSYNC | BTRFS_INODE_BARRIER); else if (!S_ISDIR(inode->i_mode)) flags &= (BTRFS_INODE_NODUMP | BTRFS_INODE_NOATIME); @@ -127,6 +180,30 @@ btrfs_update_iflags(inode); } +int btrfs_sync_flags(struct inode *inode, int flags, int vflags) +{ + struct btrfs_inode *ip = BTRFS_I(inode); + struct btrfs_root *root = ip->root; + struct btrfs_trans_handle *trans; + int ret; + + trans = btrfs_join_transaction(root, 1); + BUG_ON(!trans); + + inode->i_flags = flags; + inode->i_vflags = vflags; + btrfs_update_flags(inode); + + ret = btrfs_update_inode(trans, root, inode); + BUG_ON(ret); + + btrfs_update_iflags(inode); + inode->i_ctime = CURRENT_TIME; + btrfs_end_transaction(trans, root); + + return 0; +} + static int btrfs_ioctl_getflags(struct file *file, void __user *arg) { struct btrfs_inode *ip = BTRFS_I(file->f_path.dentry->d_inode); @@ -149,6 +226,7 @@ if (copy_from_user(&flags, arg, sizeof(flags))) return -EFAULT; + /* maybe add FS_IXUNLINK_FL ? */ if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ FS_NOATIME_FL | FS_NODUMP_FL | \ FS_SYNC_FL | FS_DIRSYNC_FL)) @@ -161,7 +239,8 @@ flags = btrfs_mask_flags(inode->i_mode, flags); oldflags = btrfs_flags_to_ioctl(ip->flags); - if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) { + if ((flags ^ oldflags) & (FS_APPEND_FL | + FS_IMMUTABLE_FL | FS_IXUNLINK_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) { ret = -EPERM; goto out_unlock; @@ -172,14 +251,19 @@ if (ret) goto out_unlock; - if (flags & FS_SYNC_FL) - ip->flags |= BTRFS_INODE_SYNC; - else - ip->flags &= ~BTRFS_INODE_SYNC; if (flags & FS_IMMUTABLE_FL) ip->flags |= BTRFS_INODE_IMMUTABLE; else ip->flags &= ~BTRFS_INODE_IMMUTABLE; + if (flags & FS_IXUNLINK_FL) + ip->flags |= BTRFS_INODE_IXUNLINK; + else + ip->flags &= ~BTRFS_INODE_IXUNLINK; + + if (flags & FS_SYNC_FL) + ip->flags |= BTRFS_INODE_SYNC; + else + ip->flags &= ~BTRFS_INODE_SYNC; if (flags & FS_APPEND_FL) ip->flags |= BTRFS_INODE_APPEND; else diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/btrfs/super.c kernel-2.6.32.54.vs/linux-2.6.32/fs/btrfs/super.c --- kernel-2.6.32.54/linux-2.6.32/fs/btrfs/super.c 2012-01-16 15:01:39.276727685 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/btrfs/super.c 2012-01-16 14:51:21.773409205 +0100 @@ -67,7 +67,7 @@ Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, Opt_notreelog, Opt_ratio, Opt_flushoncommit, - Opt_discard, Opt_err, + Opt_tag, Opt_notag, Opt_tagid, Opt_discard, Opt_err, }; static match_table_t tokens = { @@ -90,6 +90,9 @@ {Opt_flushoncommit, "flushoncommit"}, {Opt_ratio, "metadata_ratio=%d"}, {Opt_discard, "discard"}, + {Opt_tag, "tag"}, + {Opt_notag, "notag"}, + {Opt_tagid, "tagid=%u"}, {Opt_err, NULL}, }; @@ -264,6 +267,22 @@ case Opt_discard: btrfs_set_opt(info->mount_opt, DISCARD); break; +#ifndef CONFIG_TAGGING_NONE + case Opt_tag: + printk(KERN_INFO "btrfs: use tagging\n"); + btrfs_set_opt(info->mount_opt, TAGGED); + break; + case Opt_notag: + printk(KERN_INFO "btrfs: disabled tagging\n"); + btrfs_clear_opt(info->mount_opt, TAGGED); + break; +#endif +#ifdef CONFIG_PROPAGATE + case Opt_tagid: + /* use args[0] */ + btrfs_set_opt(info->mount_opt, TAGGED); + break; +#endif case Opt_err: printk(KERN_INFO "btrfs: unrecognized mount option " "'%s'\n", p); @@ -585,6 +604,12 @@ if (ret) return -EINVAL; + if (btrfs_test_opt(root, TAGGED) && !(sb->s_flags & MS_TAGGED)) { + printk("btrfs: %s: tagging not permitted on remount.\n", + sb->s_id); + return -EINVAL; + } + if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) return 0; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/char_dev.c kernel-2.6.32.54.vs/linux-2.6.32/fs/char_dev.c --- kernel-2.6.32.54/linux-2.6.32/fs/char_dev.c 2012-01-16 15:01:39.284727657 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/char_dev.c 2012-01-16 14:51:21.781409177 +0100 @@ -20,6 +20,8 @@ #include #include #include +#include +#include #include "internal.h" @@ -370,14 +372,21 @@ struct cdev *p; struct cdev *new = NULL; int ret = 0; + dev_t mdev; + + if (!vs_map_chrdev(inode->i_rdev, &mdev, DATTR_OPEN)) + return -EPERM; + inode->i_mdev = mdev; spin_lock(&cdev_lock); p = inode->i_cdev; if (!p) { struct kobject *kobj; int idx; + spin_unlock(&cdev_lock); - kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx); + + kobj = kobj_lookup(cdev_map, mdev, &idx); if (!kobj) return -ENXIO; new = container_of(kobj, struct cdev, kobj); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/dcache.c kernel-2.6.32.54.vs/linux-2.6.32/fs/dcache.c --- kernel-2.6.32.54/linux-2.6.32/fs/dcache.c 2012-01-16 15:01:39.284727657 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/dcache.c 2012-01-16 14:51:21.781409177 +0100 @@ -33,6 +33,7 @@ #include #include #include +#include #include "internal.h" int sysctl_vfs_cache_pressure __read_mostly = 100; @@ -230,6 +231,8 @@ return; } + vx_dentry_dec(dentry); + /* * AV: ->d_delete() is _NOT_ allowed to block now. */ @@ -321,6 +324,7 @@ { atomic_inc(&dentry->d_count); dentry_lru_del_init(dentry); + vx_dentry_inc(dentry); return dentry; } @@ -919,6 +923,9 @@ struct dentry *dentry; char *dname; + if (!vx_dentry_avail(1)) + return NULL; + dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); if (!dentry) return NULL; @@ -964,6 +971,7 @@ if (parent) list_add(&dentry->d_u.d_child, &parent->d_subdirs); dentry_stat.nr_dentry++; + vx_dentry_inc(dentry); spin_unlock(&dcache_lock); return dentry; @@ -1410,6 +1418,7 @@ } atomic_inc(&dentry->d_count); + vx_dentry_inc(dentry); found = dentry; spin_unlock(&dentry->d_lock); break; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/devpts/inode.c kernel-2.6.32.54.vs/linux-2.6.32/fs/devpts/inode.c --- kernel-2.6.32.54/linux-2.6.32/fs/devpts/inode.c 2012-01-16 15:01:39.296727614 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/devpts/inode.c 2012-01-16 14:51:21.781409177 +0100 @@ -24,6 +24,7 @@ #include #include #include +#include #define DEVPTS_DEFAULT_MODE 0600 /* @@ -35,6 +36,20 @@ #define DEVPTS_DEFAULT_PTMX_MODE 0000 #define PTMX_MINOR 2 +static int devpts_permission(struct inode *inode, int mask) +{ + int ret = -EACCES; + + /* devpts is xid tagged */ + if (vx_check((xid_t)inode->i_tag, VS_WATCH_P | VS_IDENT)) + ret = generic_permission(inode, mask, NULL); + return ret; +} + +static struct inode_operations devpts_file_inode_operations = { + .permission = devpts_permission, +}; + extern int pty_limit; /* Config limit on Unix98 ptys */ static DEFINE_MUTEX(allocated_ptys_lock); @@ -262,6 +277,25 @@ return 0; } +static int devpts_filter(struct dentry *de) +{ + /* devpts is xid tagged */ + return vx_check((xid_t)de->d_inode->i_tag, VS_WATCH_P | VS_IDENT); +} + +static int devpts_readdir(struct file * filp, void * dirent, filldir_t filldir) +{ + return dcache_readdir_filter(filp, dirent, filldir, devpts_filter); +} + +static struct file_operations devpts_dir_operations = { + .open = dcache_dir_open, + .release = dcache_dir_close, + .llseek = dcache_dir_lseek, + .read = generic_read_dir, + .readdir = devpts_readdir, +}; + static const struct super_operations devpts_sops = { .statfs = simple_statfs, .remount_fs = devpts_remount, @@ -301,12 +335,15 @@ inode = new_inode(s); if (!inode) goto free_fsi; + inode->i_ino = 1; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR; inode->i_op = &simple_dir_inode_operations; - inode->i_fop = &simple_dir_operations; + inode->i_fop = &devpts_dir_operations; inode->i_nlink = 2; + /* devpts is xid tagged */ + inode->i_tag = (tag_t)vx_current_xid(); s->s_root = d_alloc_root(inode); if (s->s_root) @@ -497,6 +534,9 @@ inode->i_gid = opts->setgid ? opts->gid : current_fsgid(); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; init_special_inode(inode, S_IFCHR|opts->mode, device); + /* devpts is xid tagged */ + inode->i_tag = (tag_t)vx_current_xid(); + inode->i_op = &devpts_file_inode_operations; inode->i_private = tty; tty->driver_data = inode; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/exec.c kernel-2.6.32.54.vs/linux-2.6.32/fs/exec.c --- kernel-2.6.32.54/linux-2.6.32/fs/exec.c 2012-01-16 15:01:39.308727572 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/exec.c 2012-01-16 14:51:21.781409177 +0100 @@ -273,7 +273,9 @@ if (err) goto err; - mm->stack_vm = mm->total_vm = 1; + mm->total_vm = 0; + vx_vmpages_inc(mm); + mm->stack_vm = 1; up_write(&mm->mmap_sem); bprm->p = vma->vm_end - sizeof(void *); return 0; @@ -1534,7 +1536,7 @@ /* UNIX time of coredump */ case 't': { struct timeval tv; - do_gettimeofday(&tv); + vx_gettimeofday(&tv); rc = snprintf(out_ptr, out_end - out_ptr, "%lu", tv.tv_sec); if (rc > out_end - out_ptr) diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext2/balloc.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext2/balloc.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext2/balloc.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext2/balloc.c 2012-01-16 14:51:21.781409177 +0100 @@ -701,7 +701,6 @@ start = 0; end = EXT2_BLOCKS_PER_GROUP(sb); } - BUG_ON(start > EXT2_BLOCKS_PER_GROUP(sb)); repeat: diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext2/ext2.h kernel-2.6.32.54.vs/linux-2.6.32/fs/ext2/ext2.h --- kernel-2.6.32.54/linux-2.6.32/fs/ext2/ext2.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext2/ext2.h 2012-01-16 14:51:21.785409163 +0100 @@ -131,6 +131,7 @@ int __ext2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); +extern int ext2_sync_flags(struct inode *, int, int); /* ioctl.c */ extern long ext2_ioctl(struct file *, unsigned int, unsigned long); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext2/file.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext2/file.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext2/file.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext2/file.c 2012-01-16 14:51:21.785409163 +0100 @@ -87,4 +87,5 @@ .setattr = ext2_setattr, .check_acl = ext2_check_acl, .fiemap = ext2_fiemap, + .sync_flags = ext2_sync_flags, }; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext2/ialloc.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext2/ialloc.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext2/ialloc.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext2/ialloc.c 2012-01-16 14:51:21.785409163 +0100 @@ -17,6 +17,7 @@ #include #include #include +#include #include "ext2.h" #include "xattr.h" #include "acl.h" @@ -560,6 +561,7 @@ } else inode->i_gid = current_fsgid(); inode->i_mode = mode; + inode->i_tag = dx_current_fstag(sb); inode->i_ino = ino; inode->i_blocks = 0; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext2/inode.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext2/inode.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext2/inode.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext2/inode.c 2012-01-16 14:51:21.785409163 +0100 @@ -33,6 +33,7 @@ #include #include #include +#include #include "ext2.h" #include "acl.h" #include "xip.h" @@ -1040,7 +1041,7 @@ return; if (ext2_inode_is_fast_symlink(inode)) return; - if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) + if (IS_APPEND(inode) || IS_IXORUNLINK(inode)) return; blocksize = inode->i_sb->s_blocksize; @@ -1178,36 +1179,61 @@ { unsigned int flags = EXT2_I(inode)->i_flags; - inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); + inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK | + S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC); + + + if (flags & EXT2_IMMUTABLE_FL) + inode->i_flags |= S_IMMUTABLE; + if (flags & EXT2_IXUNLINK_FL) + inode->i_flags |= S_IXUNLINK; + if (flags & EXT2_SYNC_FL) inode->i_flags |= S_SYNC; if (flags & EXT2_APPEND_FL) inode->i_flags |= S_APPEND; - if (flags & EXT2_IMMUTABLE_FL) - inode->i_flags |= S_IMMUTABLE; if (flags & EXT2_NOATIME_FL) inode->i_flags |= S_NOATIME; if (flags & EXT2_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; + + inode->i_vflags &= ~(V_BARRIER | V_COW); + + if (flags & EXT2_BARRIER_FL) + inode->i_vflags |= V_BARRIER; + if (flags & EXT2_COW_FL) + inode->i_vflags |= V_COW; } /* Propagate flags from i_flags to EXT2_I(inode)->i_flags */ void ext2_get_inode_flags(struct ext2_inode_info *ei) { unsigned int flags = ei->vfs_inode.i_flags; + unsigned int vflags = ei->vfs_inode.i_vflags; + + ei->i_flags &= ~(EXT2_SYNC_FL | EXT2_APPEND_FL | + EXT2_IMMUTABLE_FL | EXT2_IXUNLINK_FL | + EXT2_NOATIME_FL | EXT2_DIRSYNC_FL | + EXT2_BARRIER_FL | EXT2_COW_FL); + + if (flags & S_IMMUTABLE) + ei->i_flags |= EXT2_IMMUTABLE_FL; + if (flags & S_IXUNLINK) + ei->i_flags |= EXT2_IXUNLINK_FL; - ei->i_flags &= ~(EXT2_SYNC_FL|EXT2_APPEND_FL| - EXT2_IMMUTABLE_FL|EXT2_NOATIME_FL|EXT2_DIRSYNC_FL); if (flags & S_SYNC) ei->i_flags |= EXT2_SYNC_FL; if (flags & S_APPEND) ei->i_flags |= EXT2_APPEND_FL; - if (flags & S_IMMUTABLE) - ei->i_flags |= EXT2_IMMUTABLE_FL; if (flags & S_NOATIME) ei->i_flags |= EXT2_NOATIME_FL; if (flags & S_DIRSYNC) ei->i_flags |= EXT2_DIRSYNC_FL; + + if (vflags & V_BARRIER) + ei->i_flags |= EXT2_BARRIER_FL; + if (vflags & V_COW) + ei->i_flags |= EXT2_COW_FL; } struct inode *ext2_iget (struct super_block *sb, unsigned long ino) @@ -1217,6 +1243,8 @@ struct ext2_inode *raw_inode; struct inode *inode; long ret = -EIO; + uid_t uid; + gid_t gid; int n; inode = iget_locked(sb, ino); @@ -1235,12 +1263,17 @@ } inode->i_mode = le16_to_cpu(raw_inode->i_mode); - inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); - inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); + uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); + gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); if (!(test_opt (inode->i_sb, NO_UID32))) { - inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; - inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; + uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; + gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; } + inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid); + inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid); + inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid, + le16_to_cpu(raw_inode->i_raw_tag)); + inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); inode->i_size = le32_to_cpu(raw_inode->i_size); inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime); @@ -1338,8 +1371,8 @@ struct ext2_inode_info *ei = EXT2_I(inode); struct super_block *sb = inode->i_sb; ino_t ino = inode->i_ino; - uid_t uid = inode->i_uid; - gid_t gid = inode->i_gid; + uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag); + gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag); struct buffer_head * bh; struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh); int n; @@ -1375,6 +1408,9 @@ raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } +#ifdef CONFIG_TAGGING_INTERN + raw_inode->i_raw_tag = cpu_to_le16(inode->i_tag); +#endif raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); raw_inode->i_size = cpu_to_le32(inode->i_size); raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec); @@ -1456,7 +1492,8 @@ if (error) return error; if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || - (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { + (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) || + (iattr->ia_valid & ATTR_TAG && iattr->ia_tag != inode->i_tag)) { error = vfs_dq_transfer(inode, iattr) ? -EDQUOT : 0; if (error) return error; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext2/ioctl.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext2/ioctl.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext2/ioctl.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext2/ioctl.c 2012-01-16 14:51:21.789409149 +0100 @@ -17,6 +17,16 @@ #include +int ext2_sync_flags(struct inode *inode, int flags, int vflags) +{ + inode->i_flags = flags; + inode->i_vflags = vflags; + ext2_get_inode_flags(EXT2_I(inode)); + inode->i_ctime = CURRENT_TIME_SEC; + mark_inode_dirty(inode); + return 0; +} + long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_dentry->d_inode; @@ -51,6 +61,11 @@ flags = ext2_mask_flags(inode->i_mode, flags); + if (IS_BARRIER(inode)) { + vxwprintk_task(1, "messing with the barrier."); + return -EACCES; + } + mutex_lock(&inode->i_mutex); /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) { @@ -66,7 +81,9 @@ * * This test looks nicer. Thanks to Pauline Middelink */ - if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) { + if ((oldflags & EXT2_IMMUTABLE_FL) || + ((flags ^ oldflags) & (EXT2_APPEND_FL | + EXT2_IMMUTABLE_FL | EXT2_IXUNLINK_FL))) { if (!capable(CAP_LINUX_IMMUTABLE)) { mutex_unlock(&inode->i_mutex); ret = -EPERM; @@ -74,7 +91,7 @@ } } - flags = flags & EXT2_FL_USER_MODIFIABLE; + flags &= EXT2_FL_USER_MODIFIABLE; flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE; ei->i_flags = flags; mutex_unlock(&inode->i_mutex); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext2/namei.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext2/namei.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext2/namei.c 2012-01-16 15:01:39.308727572 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext2/namei.c 2012-01-16 14:51:21.793409135 +0100 @@ -31,6 +31,7 @@ */ #include +#include #include "ext2.h" #include "xattr.h" #include "acl.h" @@ -74,6 +75,7 @@ return ERR_PTR(-EIO); } else { return ERR_CAST(inode); + dx_propagate_tag(nd, inode); } } } @@ -396,6 +398,7 @@ #endif .setattr = ext2_setattr, .check_acl = ext2_check_acl, + .sync_flags = ext2_sync_flags, }; const struct inode_operations ext2_special_inode_operations = { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext2/super.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext2/super.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext2/super.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext2/super.c 2012-01-16 14:51:21.793409135 +0100 @@ -382,7 +382,8 @@ Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, Opt_xip, Opt_ignore, Opt_err, Opt_quota, - Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation + Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation, + Opt_tag, Opt_notag, Opt_tagid }; static const match_table_t tokens = { @@ -410,6 +411,9 @@ {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, {Opt_xip, "xip"}, + {Opt_tag, "tag"}, + {Opt_notag, "notag"}, + {Opt_tagid, "tagid=%u"}, {Opt_grpquota, "grpquota"}, {Opt_ignore, "noquota"}, {Opt_quota, "quota"}, @@ -480,6 +484,20 @@ case Opt_nouid32: set_opt (sbi->s_mount_opt, NO_UID32); break; +#ifndef CONFIG_TAGGING_NONE + case Opt_tag: + set_opt (sbi->s_mount_opt, TAGGED); + break; + case Opt_notag: + clear_opt (sbi->s_mount_opt, TAGGED); + break; +#endif +#ifdef CONFIG_PROPAGATE + case Opt_tagid: + /* use args[0] */ + set_opt (sbi->s_mount_opt, TAGGED); + break; +#endif case Opt_nocheck: clear_opt (sbi->s_mount_opt, CHECK); break; @@ -829,6 +847,8 @@ if (!parse_options ((char *) data, sbi)) goto failed_mount; + if (EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_TAGGED) + sb->s_flags |= MS_TAGGED; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); @@ -1174,6 +1194,14 @@ err = -EINVAL; goto restore_opts; } + + if ((sbi->s_mount_opt & EXT2_MOUNT_TAGGED) && + !(sb->s_flags & MS_TAGGED)) { + printk("EXT2-fs: %s: tagging not permitted on remount.\n", + sb->s_id); + err = -EINVAL; + goto restore_opts; + } sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext3/file.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext3/file.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext3/file.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext3/file.c 2012-01-16 14:51:21.793409135 +0100 @@ -80,5 +80,6 @@ #endif .check_acl = ext3_check_acl, .fiemap = ext3_fiemap, + .sync_flags = ext3_sync_flags, }; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext3/ialloc.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext3/ialloc.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext3/ialloc.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext3/ialloc.c 2012-01-16 14:51:21.793409135 +0100 @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -548,6 +549,7 @@ } else inode->i_gid = current_fsgid(); inode->i_mode = mode; + inode->i_tag = dx_current_fstag(sb); inode->i_ino = ino; /* This is the optimal IO size (for stat), not the fs block size */ diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext3/inode.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext3/inode.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext3/inode.c 2012-01-16 15:01:39.308727572 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext3/inode.c 2012-01-16 14:51:21.797409121 +0100 @@ -38,6 +38,7 @@ #include #include #include +#include #include "xattr.h" #include "acl.h" @@ -2343,7 +2344,7 @@ int ext3_can_truncate(struct inode *inode) { - if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) + if (IS_APPEND(inode) || IS_IXORUNLINK(inode)) return 0; if (S_ISREG(inode->i_mode)) return 1; @@ -2728,36 +2729,60 @@ { unsigned int flags = EXT3_I(inode)->i_flags; - inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); + inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK | + S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC); + + if (flags & EXT3_IMMUTABLE_FL) + inode->i_flags |= S_IMMUTABLE; + if (flags & EXT3_IXUNLINK_FL) + inode->i_flags |= S_IXUNLINK; + if (flags & EXT3_SYNC_FL) inode->i_flags |= S_SYNC; if (flags & EXT3_APPEND_FL) inode->i_flags |= S_APPEND; - if (flags & EXT3_IMMUTABLE_FL) - inode->i_flags |= S_IMMUTABLE; if (flags & EXT3_NOATIME_FL) inode->i_flags |= S_NOATIME; if (flags & EXT3_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; + + inode->i_vflags &= ~(V_BARRIER | V_COW); + + if (flags & EXT3_BARRIER_FL) + inode->i_vflags |= V_BARRIER; + if (flags & EXT3_COW_FL) + inode->i_vflags |= V_COW; } /* Propagate flags from i_flags to EXT3_I(inode)->i_flags */ void ext3_get_inode_flags(struct ext3_inode_info *ei) { unsigned int flags = ei->vfs_inode.i_flags; + unsigned int vflags = ei->vfs_inode.i_vflags; + + ei->i_flags &= ~(EXT3_SYNC_FL | EXT3_APPEND_FL | + EXT3_IMMUTABLE_FL | EXT3_IXUNLINK_FL | + EXT3_NOATIME_FL | EXT3_DIRSYNC_FL | + EXT3_BARRIER_FL | EXT3_COW_FL); + + if (flags & S_IMMUTABLE) + ei->i_flags |= EXT3_IMMUTABLE_FL; + if (flags & S_IXUNLINK) + ei->i_flags |= EXT3_IXUNLINK_FL; - ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL| - EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL); if (flags & S_SYNC) ei->i_flags |= EXT3_SYNC_FL; if (flags & S_APPEND) ei->i_flags |= EXT3_APPEND_FL; - if (flags & S_IMMUTABLE) - ei->i_flags |= EXT3_IMMUTABLE_FL; if (flags & S_NOATIME) ei->i_flags |= EXT3_NOATIME_FL; if (flags & S_DIRSYNC) ei->i_flags |= EXT3_DIRSYNC_FL; + + if (vflags & V_BARRIER) + ei->i_flags |= EXT3_BARRIER_FL; + if (vflags & V_COW) + ei->i_flags |= EXT3_COW_FL; } struct inode *ext3_iget(struct super_block *sb, unsigned long ino) @@ -2771,6 +2796,8 @@ transaction_t *transaction; long ret; int block; + uid_t uid; + gid_t gid; inode = iget_locked(sb, ino); if (!inode) @@ -2787,12 +2814,17 @@ bh = iloc.bh; raw_inode = ext3_raw_inode(&iloc); inode->i_mode = le16_to_cpu(raw_inode->i_mode); - inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); - inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); + uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); + gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); if(!(test_opt (inode->i_sb, NO_UID32))) { - inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; - inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; + uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; + gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; } + inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid); + inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid); + inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid, + le16_to_cpu(raw_inode->i_raw_tag)); + inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); inode->i_size = le32_to_cpu(raw_inode->i_size); inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime); @@ -2947,6 +2979,8 @@ struct ext3_inode *raw_inode = ext3_raw_inode(iloc); struct ext3_inode_info *ei = EXT3_I(inode); struct buffer_head *bh = iloc->bh; + uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag); + gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag); int err = 0, rc, block; again: @@ -2961,29 +2995,32 @@ ext3_get_inode_flags(ei); raw_inode->i_mode = cpu_to_le16(inode->i_mode); if(!(test_opt(inode->i_sb, NO_UID32))) { - raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); - raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); + raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid)); + raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid)); /* * Fix up interoperability with old kernels. Otherwise, old inodes get * re-used with the upper 16 bits of the uid/gid intact */ if(!ei->i_dtime) { raw_inode->i_uid_high = - cpu_to_le16(high_16_bits(inode->i_uid)); + cpu_to_le16(high_16_bits(uid)); raw_inode->i_gid_high = - cpu_to_le16(high_16_bits(inode->i_gid)); + cpu_to_le16(high_16_bits(gid)); } else { raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } } else { raw_inode->i_uid_low = - cpu_to_le16(fs_high2lowuid(inode->i_uid)); + cpu_to_le16(fs_high2lowuid(uid)); raw_inode->i_gid_low = - cpu_to_le16(fs_high2lowgid(inode->i_gid)); + cpu_to_le16(fs_high2lowgid(gid)); raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } +#ifdef CONFIG_TAGGING_INTERN + raw_inode->i_raw_tag = cpu_to_le16(inode->i_tag); +#endif raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); raw_inode->i_size = cpu_to_le32(ei->i_disksize); raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec); @@ -3141,7 +3178,8 @@ return error; if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || - (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { + (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) || + (ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag)) { handle_t *handle; /* (user+group)*(old+new) structure, inode write (sb, @@ -3163,6 +3201,8 @@ inode->i_uid = attr->ia_uid; if (attr->ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; + if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode)) + inode->i_tag = attr->ia_tag; error = ext3_mark_inode_dirty(handle, inode); ext3_journal_stop(handle); } diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext3/ioctl.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext3/ioctl.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext3/ioctl.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext3/ioctl.c 2012-01-16 14:51:21.797409121 +0100 @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -17,6 +18,34 @@ #include #include + +int ext3_sync_flags(struct inode *inode, int flags, int vflags) +{ + handle_t *handle = NULL; + struct ext3_iloc iloc; + int err; + + handle = ext3_journal_start(inode, 1); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + if (IS_SYNC(inode)) + handle->h_sync = 1; + err = ext3_reserve_inode_write(handle, inode, &iloc); + if (err) + goto flags_err; + + inode->i_flags = flags; + inode->i_vflags = vflags; + ext3_get_inode_flags(EXT3_I(inode)); + inode->i_ctime = CURRENT_TIME_SEC; + + err = ext3_mark_iloc_dirty(handle, inode, &iloc); +flags_err: + ext3_journal_stop(handle); + return err; +} + long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_dentry->d_inode; @@ -50,6 +79,11 @@ flags = ext3_mask_flags(inode->i_mode, flags); + if (IS_BARRIER(inode)) { + vxwprintk_task(1, "messing with the barrier."); + return -EACCES; + } + mutex_lock(&inode->i_mutex); /* Is it quota file? Do not allow user to mess with it */ @@ -68,7 +102,9 @@ * * This test looks nicer. Thanks to Pauline Middelink */ - if ((flags ^ oldflags) & (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL)) { + if ((oldflags & EXT3_IMMUTABLE_FL) || + ((flags ^ oldflags) & (EXT3_APPEND_FL | + EXT3_IMMUTABLE_FL | EXT3_IXUNLINK_FL))) { if (!capable(CAP_LINUX_IMMUTABLE)) goto flags_out; } @@ -93,7 +129,7 @@ if (err) goto flags_err; - flags = flags & EXT3_FL_USER_MODIFIABLE; + flags &= EXT3_FL_USER_MODIFIABLE; flags |= oldflags & ~EXT3_FL_USER_MODIFIABLE; ei->i_flags = flags; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext3/namei.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext3/namei.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext3/namei.c 2012-01-16 15:01:39.308727572 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext3/namei.c 2012-01-16 14:51:21.797409121 +0100 @@ -36,6 +36,7 @@ #include #include #include +#include #include "namei.h" #include "xattr.h" @@ -912,6 +913,7 @@ if (bh) ll_rw_block(READ_META, 1, &bh); } + dx_propagate_tag(nd, inode); } if ((bh = bh_use[ra_ptr++]) == NULL) goto next; @@ -2455,6 +2457,7 @@ .removexattr = generic_removexattr, #endif .check_acl = ext3_check_acl, + .sync_flags = ext3_sync_flags, }; const struct inode_operations ext3_special_inode_operations = { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext3/super.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext3/super.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext3/super.c 2012-01-16 15:01:39.308727572 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext3/super.c 2012-01-16 14:51:21.801409107 +0100 @@ -789,7 +789,7 @@ Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota, Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota, - Opt_grpquota + Opt_grpquota, Opt_tag, Opt_notag, Opt_tagid }; static const match_table_t tokens = { @@ -842,6 +842,9 @@ {Opt_usrquota, "usrquota"}, {Opt_barrier, "barrier=%u"}, {Opt_resize, "resize"}, + {Opt_tag, "tag"}, + {Opt_notag, "notag"}, + {Opt_tagid, "tagid=%u"}, {Opt_err, NULL}, }; @@ -934,6 +937,20 @@ case Opt_nouid32: set_opt (sbi->s_mount_opt, NO_UID32); break; +#ifndef CONFIG_TAGGING_NONE + case Opt_tag: + set_opt (sbi->s_mount_opt, TAGGED); + break; + case Opt_notag: + clear_opt (sbi->s_mount_opt, TAGGED); + break; +#endif +#ifdef CONFIG_PROPAGATE + case Opt_tagid: + /* use args[0] */ + set_opt (sbi->s_mount_opt, TAGGED); + break; +#endif case Opt_nocheck: clear_opt (sbi->s_mount_opt, CHECK); break; @@ -1665,6 +1682,9 @@ NULL, 0)) goto failed_mount; + if (EXT3_SB(sb)->s_mount_opt & EXT3_MOUNT_TAGGED) + sb->s_flags |= MS_TAGGED; + sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | ((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); @@ -2534,6 +2554,14 @@ if (sbi->s_mount_opt & EXT3_MOUNT_ABORT) ext3_abort(sb, __func__, "Abort forced by user"); + if ((sbi->s_mount_opt & EXT3_MOUNT_TAGGED) && + !(sb->s_flags & MS_TAGGED)) { + printk("EXT3-fs: %s: tagging not permitted on remount.\n", + sb->s_id); + err = -EINVAL; + goto restore_opts; + } + sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | ((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext4/ext4.h kernel-2.6.32.54.vs/linux-2.6.32/fs/ext4/ext4.h --- kernel-2.6.32.54/linux-2.6.32/fs/ext4/ext4.h 2012-01-16 15:01:39.508726864 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext4/ext4.h 2012-01-16 14:51:21.801409107 +0100 @@ -289,8 +289,12 @@ #define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */ #define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */ #define EXT4_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */ +#define EXT4_IXUNLINK_FL 0x08000000 /* Immutable invert on unlink */ #define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ +#define EXT4_BARRIER_FL 0x04000000 /* Barrier for chroot() */ +#define EXT4_COW_FL 0x20000000 /* Copy on Write marker */ + #define EXT4_FL_USER_VISIBLE 0x004BDFFF /* User visible flags */ #define EXT4_FL_USER_MODIFIABLE 0x004B80FF /* User modifiable flags */ @@ -551,7 +555,8 @@ __le16 l_i_file_acl_high; __le16 l_i_uid_high; /* these 2 fields */ __le16 l_i_gid_high; /* were reserved2[0] */ - __u32 l_i_reserved2; + __le16 l_i_tag; /* Context Tag */ + __u16 l_i_reserved2; } linux2; struct { __le16 h_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */ @@ -665,6 +670,7 @@ #define i_gid_low i_gid #define i_uid_high osd2.linux2.l_i_uid_high #define i_gid_high osd2.linux2.l_i_gid_high +#define i_raw_tag osd2.linux2.l_i_tag #define i_reserved2 osd2.linux2.l_i_reserved2 #elif defined(__GNU__) @@ -839,6 +845,7 @@ #define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */ #define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ #define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ +#define EXT4_MOUNT_TAGGED 0x400000 /* Enable Context Tags */ #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ #define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */ @@ -1864,6 +1871,7 @@ struct buffer_head *bh, int flags); extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len); +extern int ext4_sync_flags(struct inode *, int, int); /* move_extent.c */ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 start_orig, __u64 start_donor, diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext4/file.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext4/file.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext4/file.c 2012-01-16 15:01:39.516726836 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext4/file.c 2012-01-16 14:51:21.801409107 +0100 @@ -161,5 +161,6 @@ .check_acl = ext4_check_acl, .fallocate = ext4_fallocate, .fiemap = ext4_fiemap, + .sync_flags = ext4_sync_flags, }; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext4/ialloc.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext4/ialloc.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext4/ialloc.c 2012-01-16 15:01:39.520726821 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext4/ialloc.c 2012-01-16 14:51:21.805409093 +0100 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "ext4.h" @@ -988,6 +989,7 @@ } else inode->i_gid = current_fsgid(); inode->i_mode = mode; + inode->i_tag = dx_current_fstag(sb); inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb); /* This is the optimal IO size (for stat), not the fs block size */ diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext4/inode.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext4/inode.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext4/inode.c 2012-01-16 15:01:39.524726807 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext4/inode.c 2012-01-16 14:51:21.805409093 +0100 @@ -38,6 +38,7 @@ #include #include #include +#include #include "ext4_jbd2.h" #include "xattr.h" @@ -4446,7 +4447,7 @@ int ext4_can_truncate(struct inode *inode) { - if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) + if (IS_APPEND(inode) || IS_IXORUNLINK(inode)) return 0; if (S_ISREG(inode->i_mode)) return 1; @@ -4799,41 +4800,64 @@ { unsigned int flags = EXT4_I(inode)->i_flags; - inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); + inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK | + S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC); + + if (flags & EXT4_IMMUTABLE_FL) + inode->i_flags |= S_IMMUTABLE; + if (flags & EXT4_IXUNLINK_FL) + inode->i_flags |= S_IXUNLINK; + if (flags & EXT4_SYNC_FL) inode->i_flags |= S_SYNC; if (flags & EXT4_APPEND_FL) inode->i_flags |= S_APPEND; - if (flags & EXT4_IMMUTABLE_FL) - inode->i_flags |= S_IMMUTABLE; if (flags & EXT4_NOATIME_FL) inode->i_flags |= S_NOATIME; if (flags & EXT4_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; + + inode->i_vflags &= ~(V_BARRIER | V_COW); + + if (flags & EXT4_BARRIER_FL) + inode->i_vflags |= V_BARRIER; + if (flags & EXT4_COW_FL) + inode->i_vflags |= V_COW; } /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ void ext4_get_inode_flags(struct ext4_inode_info *ei) { - unsigned int vfs_fl; + unsigned int vfs_fl, vflags; unsigned long old_fl, new_fl; do { vfs_fl = ei->vfs_inode.i_flags; + vflags = ei->vfs_inode.i_vflags; old_fl = ei->i_flags; - new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL| - EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL| - EXT4_DIRSYNC_FL); + new_fl = old_fl & ~(EXT4_SYNC_FL | EXT4_APPEND_FL | + EXT4_IMMUTABLE_FL | EXT4_IXUNLINK_FL | + EXT4_NOATIME_FL | EXT4_DIRSYNC_FL | + EXT4_BARRIER_FL | EXT4_COW_FL); + + if (vfs_fl & S_IMMUTABLE) + new_fl |= EXT4_IMMUTABLE_FL; + if (vfs_fl & S_IXUNLINK) + new_fl |= EXT4_IXUNLINK_FL; + if (vfs_fl & S_SYNC) new_fl |= EXT4_SYNC_FL; if (vfs_fl & S_APPEND) new_fl |= EXT4_APPEND_FL; - if (vfs_fl & S_IMMUTABLE) - new_fl |= EXT4_IMMUTABLE_FL; if (vfs_fl & S_NOATIME) new_fl |= EXT4_NOATIME_FL; if (vfs_fl & S_DIRSYNC) new_fl |= EXT4_DIRSYNC_FL; + + if (vflags & V_BARRIER) + new_fl |= EXT4_BARRIER_FL; + if (vflags & V_COW) + new_fl |= EXT4_COW_FL; } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl); } @@ -4869,6 +4893,8 @@ journal_t *journal = EXT4_SB(sb)->s_journal; long ret; int block; + uid_t uid; + gid_t gid; inode = iget_locked(sb, ino); if (!inode) @@ -4884,12 +4910,16 @@ goto bad_inode; raw_inode = ext4_raw_inode(&iloc); inode->i_mode = le16_to_cpu(raw_inode->i_mode); - inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); - inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); + uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); + gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); if (!(test_opt(inode->i_sb, NO_UID32))) { - inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; - inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; + uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; + gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; } + inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid); + inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid); + inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid, + le16_to_cpu(raw_inode->i_raw_tag)); inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); ei->i_state_flags = 0; @@ -5111,6 +5141,8 @@ struct ext4_inode *raw_inode = ext4_raw_inode(iloc); struct ext4_inode_info *ei = EXT4_I(inode); struct buffer_head *bh = iloc->bh; + uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag); + gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag); int err = 0, rc, block; /* For fields not not tracking in the in-memory inode, @@ -5121,29 +5153,32 @@ ext4_get_inode_flags(ei); raw_inode->i_mode = cpu_to_le16(inode->i_mode); if (!(test_opt(inode->i_sb, NO_UID32))) { - raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); - raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); + raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid)); + raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid)); /* * Fix up interoperability with old kernels. Otherwise, old inodes get * re-used with the upper 16 bits of the uid/gid intact */ if (!ei->i_dtime) { raw_inode->i_uid_high = - cpu_to_le16(high_16_bits(inode->i_uid)); + cpu_to_le16(high_16_bits(uid)); raw_inode->i_gid_high = - cpu_to_le16(high_16_bits(inode->i_gid)); + cpu_to_le16(high_16_bits(gid)); } else { raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } } else { raw_inode->i_uid_low = - cpu_to_le16(fs_high2lowuid(inode->i_uid)); + cpu_to_le16(fs_high2lowuid(uid)); raw_inode->i_gid_low = - cpu_to_le16(fs_high2lowgid(inode->i_gid)); + cpu_to_le16(fs_high2lowgid(gid)); raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } +#ifdef CONFIG_TAGGING_INTERN + raw_inode->i_raw_tag = cpu_to_le16(inode->i_tag); +#endif raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); @@ -5329,7 +5364,8 @@ return error; if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || - (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { + (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) || + (ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag)) { handle_t *handle; /* (user+group)*(old+new) structure, inode write (sb, @@ -5351,6 +5387,8 @@ inode->i_uid = attr->ia_uid; if (attr->ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; + if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode)) + inode->i_tag = attr->ia_tag; error = ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); } diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext4/ioctl.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext4/ioctl.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext4/ioctl.c 2012-01-16 15:01:39.532726779 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext4/ioctl.c 2012-01-16 14:51:21.805409093 +0100 @@ -14,10 +14,39 @@ #include #include #include +#include #include #include "ext4_jbd2.h" #include "ext4.h" + +int ext4_sync_flags(struct inode *inode, int flags, int vflags) +{ + handle_t *handle = NULL; + struct ext4_iloc iloc; + int err; + + handle = ext4_journal_start(inode, 1); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + if (IS_SYNC(inode)) + ext4_handle_sync(handle); + err = ext4_reserve_inode_write(handle, inode, &iloc); + if (err) + goto flags_err; + + inode->i_flags = flags; + inode->i_vflags = vflags; + ext4_get_inode_flags(EXT4_I(inode)); + inode->i_ctime = ext4_current_time(inode); + + err = ext4_mark_iloc_dirty(handle, inode, &iloc); +flags_err: + ext4_journal_stop(handle); + return err; +} + long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_dentry->d_inode; @@ -50,6 +79,11 @@ flags = ext4_mask_flags(inode->i_mode, flags); + if (IS_BARRIER(inode)) { + vxwprintk_task(1, "messing with the barrier."); + return -EACCES; + } + err = -EPERM; mutex_lock(&inode->i_mutex); /* Is it quota file? Do not allow user to mess with it */ @@ -67,7 +101,9 @@ * * This test looks nicer. Thanks to Pauline Middelink */ - if ((flags ^ oldflags) & (EXT4_APPEND_FL | EXT4_IMMUTABLE_FL)) { + if ((oldflags & EXT4_IMMUTABLE_FL) || + ((flags ^ oldflags) & (EXT4_APPEND_FL | + EXT4_IMMUTABLE_FL | EXT4_IXUNLINK_FL))) { if (!capable(CAP_LINUX_IMMUTABLE)) goto flags_out; } diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext4/namei.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext4/namei.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext4/namei.c 2012-01-16 15:01:39.536726765 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext4/namei.c 2012-01-16 14:51:21.813409065 +0100 @@ -34,6 +34,7 @@ #include #include #include +#include #include "ext4.h" #include "ext4_jbd2.h" @@ -941,6 +942,7 @@ if (bh) ll_rw_block(READ_META, 1, &bh); } + dx_propagate_tag(nd, inode); } if ((bh = bh_use[ra_ptr++]) == NULL) goto next; @@ -2543,6 +2545,7 @@ #endif .check_acl = ext4_check_acl, .fiemap = ext4_fiemap, + .sync_flags = ext4_sync_flags, }; const struct inode_operations ext4_special_inode_operations = { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ext4/super.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ext4/super.c --- kernel-2.6.32.54/linux-2.6.32/fs/ext4/super.c 2012-01-16 15:01:39.540726751 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ext4/super.c 2012-01-16 14:51:21.813409065 +0100 @@ -1100,6 +1100,7 @@ Opt_block_validity, Opt_noblock_validity, Opt_inode_readahead_blks, Opt_journal_ioprio, Opt_discard, Opt_nodiscard, + Opt_tag, Opt_notag, Opt_tagid }; static const match_table_t tokens = { @@ -1167,6 +1168,9 @@ {Opt_noauto_da_alloc, "noauto_da_alloc"}, {Opt_discard, "discard"}, {Opt_nodiscard, "nodiscard"}, + {Opt_tag, "tag"}, + {Opt_notag, "notag"}, + {Opt_tagid, "tagid=%u"}, {Opt_err, NULL}, }; @@ -1269,6 +1273,20 @@ case Opt_nouid32: set_opt(sbi->s_mount_opt, NO_UID32); break; +#ifndef CONFIG_TAGGING_NONE + case Opt_tag: + set_opt (sbi->s_mount_opt, TAGGED); + break; + case Opt_notag: + clear_opt (sbi->s_mount_opt, TAGGED); + break; +#endif +#ifdef CONFIG_PROPAGATE + case Opt_tagid: + /* use args[0] */ + set_opt (sbi->s_mount_opt, TAGGED); + break; +#endif case Opt_debug: set_opt(sbi->s_mount_opt, DEBUG); break; @@ -2471,6 +2489,9 @@ &journal_ioprio, NULL, 0)) goto failed_mount; + if (EXT4_SB(sb)->s_mount_opt & EXT4_MOUNT_TAGGED) + sb->s_flags |= MS_TAGGED; + sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | ((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); @@ -3522,6 +3543,14 @@ if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) ext4_abort(sb, __func__, "Abort forced by user"); + if ((sbi->s_mount_opt & EXT4_MOUNT_TAGGED) && + !(sb->s_flags & MS_TAGGED)) { + printk("EXT4-fs: %s: tagging not permitted on remount.\n", + sb->s_id); + err = -EINVAL; + goto restore_opts; + } + sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | ((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/fcntl.c kernel-2.6.32.54.vs/linux-2.6.32/fs/fcntl.c --- kernel-2.6.32.54/linux-2.6.32/fs/fcntl.c 2012-01-16 15:01:39.540726751 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/fcntl.c 2012-01-16 14:51:21.817409051 +0100 @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -102,6 +103,8 @@ if (tofree) filp_close(tofree, files); + else + vx_openfd_inc(newfd); /* fd was unused */ return newfd; @@ -426,6 +429,8 @@ filp = fget(fd); if (!filp) goto out; + if (!vx_files_avail(1)) + goto out; err = security_file_fcntl(filp, cmd, arg); if (err) { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/file.c kernel-2.6.32.54.vs/linux-2.6.32/fs/file.c --- kernel-2.6.32.54/linux-2.6.32/fs/file.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/file.c 2012-01-16 14:51:21.817409051 +0100 @@ -20,6 +20,7 @@ #include #include #include +#include struct fdtable_defer { spinlock_t lock; @@ -368,6 +369,8 @@ struct file *f = *old_fds++; if (f) { get_file(f); + /* TODO: sum it first for check and performance */ + vx_openfd_inc(open_files - i); } else { /* * The fd may be claimed in the fd bitmap but not yet @@ -476,6 +479,7 @@ else FD_CLR(fd, fdt->close_on_exec); error = fd; + vx_openfd_inc(fd); #if 1 /* Sanity check */ if (rcu_dereference(fdt->fd[fd]) != NULL) { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/file_table.c kernel-2.6.32.54.vs/linux-2.6.32/fs/file_table.c --- kernel-2.6.32.54/linux-2.6.32/fs/file_table.c 2012-01-16 15:01:39.540726751 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/file_table.c 2012-01-16 14:51:21.817409051 +0100 @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include @@ -131,6 +133,8 @@ spin_lock_init(&f->f_lock); eventpoll_init_file(f); /* f->f_version: 0 */ + f->f_xid = vx_current_xid(); + vx_files_inc(f); return f; over: @@ -285,6 +289,8 @@ cdev_put(inode->i_cdev); fops_put(file->f_op); put_pid(file->f_owner.pid); + vx_files_dec(file); + file->f_xid = 0; file_kill(file); if (file->f_mode & FMODE_WRITE) drop_file_write_access(file); @@ -352,6 +358,8 @@ { if (atomic_long_dec_and_test(&file->f_count)) { security_file_free(file); + vx_files_dec(file); + file->f_xid = 0; file_kill(file); file_free(file); } diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/fs_struct.c kernel-2.6.32.54.vs/linux-2.6.32/fs/fs_struct.c --- kernel-2.6.32.54/linux-2.6.32/fs/fs_struct.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/fs_struct.c 2012-01-16 14:51:21.821409037 +0100 @@ -4,6 +4,7 @@ #include #include #include +#include /* * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values. @@ -77,6 +78,7 @@ { path_put(&fs->root); path_put(&fs->pwd); + atomic_dec(&vs_global_fs); kmem_cache_free(fs_cachep, fs); } @@ -112,6 +114,7 @@ fs->pwd = old->pwd; path_get(&old->pwd); read_unlock(&old->lock); + atomic_inc(&vs_global_fs); } return fs; } diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/gfs2/file.c kernel-2.6.32.54.vs/linux-2.6.32/fs/gfs2/file.c --- kernel-2.6.32.54/linux-2.6.32/fs/gfs2/file.c 2012-01-16 15:01:39.552726708 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/gfs2/file.c 2012-01-16 14:51:21.821409037 +0100 @@ -132,6 +132,9 @@ [7] = GFS2_DIF_NOATIME, [12] = GFS2_DIF_EXHASH, [14] = GFS2_DIF_INHERIT_JDATA, + [27] = GFS2_DIF_IXUNLINK, + [26] = GFS2_DIF_BARRIER, + [29] = GFS2_DIF_COW, }; static const u32 gfs2_to_fsflags[32] = { @@ -141,6 +144,9 @@ [gfs2fl_NoAtime] = FS_NOATIME_FL, [gfs2fl_ExHash] = FS_INDEX_FL, [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL, + [gfs2fl_IXUnlink] = FS_IXUNLINK_FL, + [gfs2fl_Barrier] = FS_BARRIER_FL, + [gfs2fl_Cow] = FS_COW_FL, }; static int gfs2_get_flags(struct file *filp, u32 __user *ptr) @@ -171,10 +177,16 @@ { struct gfs2_inode *ip = GFS2_I(inode); unsigned int flags = inode->i_flags; + unsigned int vflags = inode->i_vflags; + + flags &= ~(S_IMMUTABLE | S_IXUNLINK | + S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC); - flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); if (ip->i_diskflags & GFS2_DIF_IMMUTABLE) flags |= S_IMMUTABLE; + if (ip->i_diskflags & GFS2_DIF_IXUNLINK) + flags |= S_IXUNLINK; + if (ip->i_diskflags & GFS2_DIF_APPENDONLY) flags |= S_APPEND; if (ip->i_diskflags & GFS2_DIF_NOATIME) @@ -182,6 +194,43 @@ if (ip->i_diskflags & GFS2_DIF_SYNC) flags |= S_SYNC; inode->i_flags = flags; + + vflags &= ~(V_BARRIER | V_COW); + + if (ip->i_diskflags & GFS2_DIF_BARRIER) + vflags |= V_BARRIER; + if (ip->i_diskflags & GFS2_DIF_COW) + vflags |= V_COW; + inode->i_vflags = vflags; +} + +void gfs2_get_inode_flags(struct inode *inode) +{ + struct gfs2_inode *ip = GFS2_I(inode); + unsigned int flags = inode->i_flags; + unsigned int vflags = inode->i_vflags; + + ip->i_diskflags &= ~(GFS2_DIF_APPENDONLY | + GFS2_DIF_NOATIME | GFS2_DIF_SYNC | + GFS2_DIF_IMMUTABLE | GFS2_DIF_IXUNLINK | + GFS2_DIF_BARRIER | GFS2_DIF_COW); + + if (flags & S_IMMUTABLE) + ip->i_diskflags |= GFS2_DIF_IMMUTABLE; + if (flags & S_IXUNLINK) + ip->i_diskflags |= GFS2_DIF_IXUNLINK; + + if (flags & S_APPEND) + ip->i_diskflags |= GFS2_DIF_APPENDONLY; + if (flags & S_NOATIME) + ip->i_diskflags |= GFS2_DIF_NOATIME; + if (flags & S_SYNC) + ip->i_diskflags |= GFS2_DIF_SYNC; + + if (vflags & V_BARRIER) + ip->i_diskflags |= GFS2_DIF_BARRIER; + if (vflags & V_COW) + ip->i_diskflags |= GFS2_DIF_COW; } /* Flags that can be set by user space */ @@ -293,6 +342,37 @@ return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA); } +int gfs2_sync_flags(struct inode *inode, int flags, int vflags) +{ + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_sbd *sdp = GFS2_SB(inode); + struct buffer_head *bh; + struct gfs2_holder gh; + int error; + + error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); + if (error) + return error; + error = gfs2_trans_begin(sdp, RES_DINODE, 0); + if (error) + goto out; + error = gfs2_meta_inode_buffer(ip, &bh); + if (error) + goto out_trans_end; + gfs2_trans_add_bh(ip->i_gl, bh, 1); + inode->i_flags = flags; + inode->i_vflags = vflags; + gfs2_get_inode_flags(inode); + gfs2_dinode_out(ip, bh->b_data); + brelse(bh); + gfs2_set_aops(inode); +out_trans_end: + gfs2_trans_end(sdp); +out: + gfs2_glock_dq_uninit(&gh); + return error; +} + static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { switch(cmd) { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/gfs2/inode.h kernel-2.6.32.54.vs/linux-2.6.32/fs/gfs2/inode.h --- kernel-2.6.32.54/linux-2.6.32/fs/gfs2/inode.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/gfs2/inode.h 2012-01-16 14:51:21.821409037 +0100 @@ -109,6 +109,7 @@ extern const struct file_operations gfs2_dir_fops_nolock; extern void gfs2_set_inode_flags(struct inode *inode); +extern int gfs2_sync_flags(struct inode *inode, int flags, int vflags); #ifdef CONFIG_GFS2_FS_LOCKING_DLM extern const struct file_operations gfs2_file_fops; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/gfs2/ops_inode.c kernel-2.6.32.54.vs/linux-2.6.32/fs/gfs2/ops_inode.c --- kernel-2.6.32.54/linux-2.6.32/fs/gfs2/ops_inode.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/gfs2/ops_inode.c 2012-01-16 14:51:21.825409023 +0100 @@ -1400,6 +1400,7 @@ .listxattr = gfs2_listxattr, .removexattr = gfs2_removexattr, .fiemap = gfs2_fiemap, + .sync_flags = gfs2_sync_flags, }; const struct inode_operations gfs2_dir_iops = { @@ -1420,6 +1421,7 @@ .listxattr = gfs2_listxattr, .removexattr = gfs2_removexattr, .fiemap = gfs2_fiemap, + .sync_flags = gfs2_sync_flags, }; const struct inode_operations gfs2_symlink_iops = { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/hfsplus/ioctl.c kernel-2.6.32.54.vs/linux-2.6.32/fs/hfsplus/ioctl.c --- kernel-2.6.32.54/linux-2.6.32/fs/hfsplus/ioctl.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/hfsplus/ioctl.c 2012-01-16 14:51:21.825409023 +0100 @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "hfsplus_fs.h" diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/inode.c kernel-2.6.32.54.vs/linux-2.6.32/fs/inode.c --- kernel-2.6.32.54/linux-2.6.32/fs/inode.c 2012-01-16 15:01:39.560726680 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/inode.c 2012-01-16 14:51:21.825409023 +0100 @@ -133,6 +133,9 @@ struct address_space *const mapping = &inode->i_data; inode->i_sb = sb; + + /* essential because of inode slab reuse */ + inode->i_tag = 0; inode->i_blkbits = sb->s_blocksize_bits; inode->i_flags = 0; atomic_set(&inode->i_count, 1); @@ -153,6 +156,7 @@ inode->i_bdev = NULL; inode->i_cdev = NULL; inode->i_rdev = 0; + inode->i_mdev = 0; inode->dirtied_when = 0; if (security_inode_alloc(inode)) @@ -315,6 +319,8 @@ inodes_stat.nr_unused--; } +EXPORT_SYMBOL_GPL(__iget); + /** * clear_inode - clear an inode * @inode: inode to clear @@ -1619,9 +1625,11 @@ if (S_ISCHR(mode)) { inode->i_fop = &def_chr_fops; inode->i_rdev = rdev; + inode->i_mdev = rdev; } else if (S_ISBLK(mode)) { inode->i_fop = &def_blk_fops; inode->i_rdev = rdev; + inode->i_mdev = rdev; } else if (S_ISFIFO(mode)) inode->i_fop = &def_fifo_fops; else if (S_ISSOCK(mode)) diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ioctl.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ioctl.c --- kernel-2.6.32.54/linux-2.6.32/fs/ioctl.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ioctl.c 2012-01-16 14:51:21.829409009 +0100 @@ -16,6 +16,9 @@ #include #include #include +#include +#include +#include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ioprio.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ioprio.c --- kernel-2.6.32.54/linux-2.6.32/fs/ioprio.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ioprio.c 2012-01-16 14:51:21.833408995 +0100 @@ -26,6 +26,7 @@ #include #include #include +#include int set_task_ioprio(struct task_struct *task, int ioprio) { @@ -123,6 +124,8 @@ else pgrp = find_vpid(who); do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { + if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT)) + continue; ret = set_task_ioprio(p, ioprio); if (ret) break; @@ -212,6 +215,8 @@ else pgrp = find_vpid(who); do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { + if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT)) + continue; tmpio = get_task_ioprio(p); if (tmpio < 0) continue; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/jfs/acl.c kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/acl.c --- kernel-2.6.32.54/linux-2.6.32/fs/jfs/acl.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/acl.c 2012-01-16 14:51:21.833408995 +0100 @@ -216,7 +216,8 @@ return rc; if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || - (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { + (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) || + (iattr->ia_valid & ATTR_TAG && iattr->ia_tag != inode->i_tag)) { if (vfs_dq_transfer(inode, iattr)) return -EDQUOT; } diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/jfs/file.c kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/file.c --- kernel-2.6.32.54/linux-2.6.32/fs/jfs/file.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/file.c 2012-01-16 14:51:21.833408995 +0100 @@ -98,6 +98,7 @@ .setattr = jfs_setattr, .check_acl = jfs_check_acl, #endif + .sync_flags = jfs_sync_flags, }; const struct file_operations jfs_file_operations = { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/jfs/ioctl.c kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/ioctl.c --- kernel-2.6.32.54/linux-2.6.32/fs/jfs/ioctl.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/ioctl.c 2012-01-16 14:51:21.837408981 +0100 @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -52,6 +53,16 @@ } +int jfs_sync_flags(struct inode *inode, int flags, int vflags) +{ + inode->i_flags = flags; + inode->i_vflags = vflags; + jfs_get_inode_flags(JFS_IP(inode)); + inode->i_ctime = CURRENT_TIME_SEC; + mark_inode_dirty(inode); + return 0; +} + long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_dentry->d_inode; @@ -85,6 +96,11 @@ if (!S_ISDIR(inode->i_mode)) flags &= ~JFS_DIRSYNC_FL; + if (IS_BARRIER(inode)) { + vxwprintk_task(1, "messing with the barrier."); + return -EACCES; + } + /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) { err = -EPERM; @@ -102,8 +118,8 @@ * the relevant capability. */ if ((oldflags & JFS_IMMUTABLE_FL) || - ((flags ^ oldflags) & - (JFS_APPEND_FL | JFS_IMMUTABLE_FL))) { + ((flags ^ oldflags) & (JFS_APPEND_FL | + JFS_IMMUTABLE_FL | JFS_IXUNLINK_FL))) { if (!capable(CAP_LINUX_IMMUTABLE)) { mutex_unlock(&inode->i_mutex); err = -EPERM; @@ -111,7 +127,7 @@ } } - flags = flags & JFS_FL_USER_MODIFIABLE; + flags &= JFS_FL_USER_MODIFIABLE; flags |= oldflags & ~JFS_FL_USER_MODIFIABLE; jfs_inode->mode2 = flags; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/jfs/jfs_dinode.h kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/jfs_dinode.h --- kernel-2.6.32.54/linux-2.6.32/fs/jfs/jfs_dinode.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/jfs_dinode.h 2012-01-16 14:51:21.837408981 +0100 @@ -161,9 +161,13 @@ #define JFS_APPEND_FL 0x01000000 /* writes to file may only append */ #define JFS_IMMUTABLE_FL 0x02000000 /* Immutable file */ +#define JFS_IXUNLINK_FL 0x08000000 /* Immutable invert on unlink */ -#define JFS_FL_USER_VISIBLE 0x03F80000 -#define JFS_FL_USER_MODIFIABLE 0x03F80000 +#define JFS_BARRIER_FL 0x04000000 /* Barrier for chroot() */ +#define JFS_COW_FL 0x20000000 /* Copy on Write marker */ + +#define JFS_FL_USER_VISIBLE 0x07F80000 +#define JFS_FL_USER_MODIFIABLE 0x07F80000 #define JFS_FL_INHERIT 0x03C80000 /* These are identical to EXT[23]_IOC_GETFLAGS/SETFLAGS */ diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/jfs/jfs_filsys.h kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/jfs_filsys.h --- kernel-2.6.32.54/linux-2.6.32/fs/jfs/jfs_filsys.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/jfs_filsys.h 2012-01-16 14:51:21.837408981 +0100 @@ -263,6 +263,7 @@ #define JFS_NAME_MAX 255 #define JFS_PATH_MAX BPSIZE +#define JFS_TAGGED 0x00800000 /* Context Tagging */ /* * file system state (superblock state) diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/jfs/jfs_imap.c kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/jfs_imap.c --- kernel-2.6.32.54/linux-2.6.32/fs/jfs/jfs_imap.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/jfs_imap.c 2012-01-16 14:51:21.841408967 +0100 @@ -45,6 +45,7 @@ #include #include #include +#include #include "jfs_incore.h" #include "jfs_inode.h" @@ -3059,6 +3060,8 @@ { struct jfs_inode_info *jfs_ip = JFS_IP(ip); struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); + uid_t uid; + gid_t gid; jfs_ip->fileset = le32_to_cpu(dip->di_fileset); jfs_ip->mode2 = le32_to_cpu(dip->di_mode); @@ -3079,14 +3082,18 @@ } ip->i_nlink = le32_to_cpu(dip->di_nlink); - jfs_ip->saved_uid = le32_to_cpu(dip->di_uid); + uid = le32_to_cpu(dip->di_uid); + gid = le32_to_cpu(dip->di_gid); + ip->i_tag = INOTAG_TAG(DX_TAG(ip), uid, gid, 0); + + jfs_ip->saved_uid = INOTAG_UID(DX_TAG(ip), uid, gid); if (sbi->uid == -1) ip->i_uid = jfs_ip->saved_uid; else { ip->i_uid = sbi->uid; } - jfs_ip->saved_gid = le32_to_cpu(dip->di_gid); + jfs_ip->saved_gid = INOTAG_GID(DX_TAG(ip), uid, gid); if (sbi->gid == -1) ip->i_gid = jfs_ip->saved_gid; else { @@ -3151,14 +3158,12 @@ dip->di_size = cpu_to_le64(ip->i_size); dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks)); dip->di_nlink = cpu_to_le32(ip->i_nlink); - if (sbi->uid == -1) - dip->di_uid = cpu_to_le32(ip->i_uid); - else - dip->di_uid = cpu_to_le32(jfs_ip->saved_uid); - if (sbi->gid == -1) - dip->di_gid = cpu_to_le32(ip->i_gid); - else - dip->di_gid = cpu_to_le32(jfs_ip->saved_gid); + + dip->di_uid = cpu_to_le32(TAGINO_UID(DX_TAG(ip), + (sbi->uid == -1) ? ip->i_uid : jfs_ip->saved_uid, ip->i_tag)); + dip->di_gid = cpu_to_le32(TAGINO_GID(DX_TAG(ip), + (sbi->gid == -1) ? ip->i_gid : jfs_ip->saved_gid, ip->i_tag)); + jfs_get_inode_flags(jfs_ip); /* * mode2 is only needed for storing the higher order bits. diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/jfs/jfs_inode.c kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/jfs_inode.c --- kernel-2.6.32.54/linux-2.6.32/fs/jfs/jfs_inode.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/jfs_inode.c 2012-01-16 14:51:21.841408967 +0100 @@ -18,6 +18,7 @@ #include #include +#include #include "jfs_incore.h" #include "jfs_inode.h" #include "jfs_filsys.h" @@ -30,29 +31,46 @@ { unsigned int flags = JFS_IP(inode)->mode2; - inode->i_flags &= ~(S_IMMUTABLE | S_APPEND | - S_NOATIME | S_DIRSYNC | S_SYNC); + inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK | + S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC); if (flags & JFS_IMMUTABLE_FL) inode->i_flags |= S_IMMUTABLE; + if (flags & JFS_IXUNLINK_FL) + inode->i_flags |= S_IXUNLINK; + + if (flags & JFS_SYNC_FL) + inode->i_flags |= S_SYNC; if (flags & JFS_APPEND_FL) inode->i_flags |= S_APPEND; if (flags & JFS_NOATIME_FL) inode->i_flags |= S_NOATIME; if (flags & JFS_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; - if (flags & JFS_SYNC_FL) - inode->i_flags |= S_SYNC; + + inode->i_vflags &= ~(V_BARRIER | V_COW); + + if (flags & JFS_BARRIER_FL) + inode->i_vflags |= V_BARRIER; + if (flags & JFS_COW_FL) + inode->i_vflags |= V_COW; } void jfs_get_inode_flags(struct jfs_inode_info *jfs_ip) { unsigned int flags = jfs_ip->vfs_inode.i_flags; + unsigned int vflags = jfs_ip->vfs_inode.i_vflags; + + jfs_ip->mode2 &= ~(JFS_IMMUTABLE_FL | JFS_IXUNLINK_FL | + JFS_APPEND_FL | JFS_NOATIME_FL | + JFS_DIRSYNC_FL | JFS_SYNC_FL | + JFS_BARRIER_FL | JFS_COW_FL); - jfs_ip->mode2 &= ~(JFS_IMMUTABLE_FL | JFS_APPEND_FL | JFS_NOATIME_FL | - JFS_DIRSYNC_FL | JFS_SYNC_FL); if (flags & S_IMMUTABLE) jfs_ip->mode2 |= JFS_IMMUTABLE_FL; + if (flags & S_IXUNLINK) + jfs_ip->mode2 |= JFS_IXUNLINK_FL; + if (flags & S_APPEND) jfs_ip->mode2 |= JFS_APPEND_FL; if (flags & S_NOATIME) @@ -61,6 +79,11 @@ jfs_ip->mode2 |= JFS_DIRSYNC_FL; if (flags & S_SYNC) jfs_ip->mode2 |= JFS_SYNC_FL; + + if (vflags & V_BARRIER) + jfs_ip->mode2 |= JFS_BARRIER_FL; + if (vflags & V_COW) + jfs_ip->mode2 |= JFS_COW_FL; } /* @@ -105,6 +128,7 @@ mode |= S_ISGID; } else inode->i_gid = current_fsgid(); + inode->i_tag = dx_current_fstag(sb); /* * New inodes need to save sane values on disk when diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/jfs/jfs_inode.h kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/jfs_inode.h --- kernel-2.6.32.54/linux-2.6.32/fs/jfs/jfs_inode.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/jfs_inode.h 2012-01-16 14:51:21.841408967 +0100 @@ -39,6 +39,7 @@ extern struct dentry *jfs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type); extern void jfs_set_inode_flags(struct inode *); +extern int jfs_sync_flags(struct inode *, int, int); extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int); extern const struct address_space_operations jfs_aops; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/jfs/namei.c kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/namei.c --- kernel-2.6.32.54/linux-2.6.32/fs/jfs/namei.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/namei.c 2012-01-16 14:51:21.845408953 +0100 @@ -21,6 +21,7 @@ #include #include #include +#include #include "jfs_incore.h" #include "jfs_superblock.h" #include "jfs_inode.h" @@ -1476,6 +1477,7 @@ return ERR_CAST(ip); } + dx_propagate_tag(nd, ip); dentry = d_splice_alias(ip, dentry); if (dentry && (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2)) @@ -1545,6 +1547,7 @@ .setattr = jfs_setattr, .check_acl = jfs_check_acl, #endif + .sync_flags = jfs_sync_flags, }; const struct file_operations jfs_dir_operations = { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/jfs/super.c kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/super.c --- kernel-2.6.32.54/linux-2.6.32/fs/jfs/super.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/jfs/super.c 2012-01-16 14:51:21.845408953 +0100 @@ -192,7 +192,8 @@ enum { Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize, Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota, - Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask + Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask, + Opt_tag, Opt_notag, Opt_tagid }; static const match_table_t tokens = { @@ -202,6 +203,10 @@ {Opt_resize, "resize=%u"}, {Opt_resize_nosize, "resize"}, {Opt_errors, "errors=%s"}, + {Opt_tag, "tag"}, + {Opt_notag, "notag"}, + {Opt_tagid, "tagid=%u"}, + {Opt_tag, "tagxid"}, {Opt_ignore, "noquota"}, {Opt_ignore, "quota"}, {Opt_usrquota, "usrquota"}, @@ -336,6 +341,20 @@ } break; } +#ifndef CONFIG_TAGGING_NONE + case Opt_tag: + *flag |= JFS_TAGGED; + break; + case Opt_notag: + *flag &= JFS_TAGGED; + break; +#endif +#ifdef CONFIG_PROPAGATE + case Opt_tagid: + /* use args[0] */ + *flag |= JFS_TAGGED; + break; +#endif default: printk("jfs: Unrecognized mount option \"%s\" " " or missing value\n", p); @@ -366,6 +385,12 @@ if (!parse_options(data, sb, &newLVSize, &flag)) { return -EINVAL; } + if ((flag & JFS_TAGGED) && !(sb->s_flags & MS_TAGGED)) { + printk(KERN_ERR "JFS: %s: tagging not permitted on remount.\n", + sb->s_id); + return -EINVAL; + } + lock_kernel(); if (newLVSize) { if (sb->s_flags & MS_RDONLY) { @@ -449,6 +474,9 @@ #ifdef CONFIG_JFS_POSIX_ACL sb->s_flags |= MS_POSIXACL; #endif + /* map mount option tagxid */ + if (sbi->flag & JFS_TAGGED) + sb->s_flags |= MS_TAGGED; if (newLVSize) { printk(KERN_ERR "resize option for remount only\n"); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/libfs.c kernel-2.6.32.54.vs/linux-2.6.32/fs/libfs.c --- kernel-2.6.32.54/linux-2.6.32/fs/libfs.c 2012-01-16 15:01:39.572726637 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/libfs.c 2012-01-16 14:51:21.845408953 +0100 @@ -127,7 +127,8 @@ * both impossible due to the lock on directory. */ -int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) +static inline int do_dcache_readdir_filter(struct file *filp, + void *dirent, filldir_t filldir, int (*filter)(struct dentry *dentry)) { struct dentry *dentry = filp->f_path.dentry; struct dentry *cursor = filp->private_data; @@ -160,6 +161,8 @@ next = list_entry(p, struct dentry, d_u.d_child); if (d_unhashed(next) || !next->d_inode) continue; + if (filter && !filter(next)) + continue; spin_unlock(&dcache_lock); if (filldir(dirent, next->d_name.name, @@ -178,6 +181,18 @@ return 0; } +int dcache_readdir(struct file *filp, void *dirent, filldir_t filldir) +{ + return do_dcache_readdir_filter(filp, dirent, filldir, NULL); +} + +int dcache_readdir_filter(struct file *filp, void *dirent, filldir_t filldir, + int (*filter)(struct dentry *)) +{ + return do_dcache_readdir_filter(filp, dirent, filldir, filter); +} + + ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos) { return -EISDIR; @@ -842,6 +857,7 @@ EXPORT_SYMBOL(dcache_dir_lseek); EXPORT_SYMBOL(dcache_dir_open); EXPORT_SYMBOL(dcache_readdir); +EXPORT_SYMBOL(dcache_readdir_filter); EXPORT_SYMBOL(generic_read_dir); EXPORT_SYMBOL(get_sb_pseudo); EXPORT_SYMBOL(simple_write_begin); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/locks.c kernel-2.6.32.54.vs/linux-2.6.32/fs/locks.c --- kernel-2.6.32.54/linux-2.6.32/fs/locks.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/locks.c 2012-01-16 14:51:21.845408953 +0100 @@ -127,6 +127,8 @@ #include #include #include +#include +#include #include @@ -148,6 +150,8 @@ /* Allocate an empty lock structure. */ static struct file_lock *locks_alloc_lock(void) { + if (!vx_locks_avail(1)) + return NULL; return kmem_cache_alloc(filelock_cache, GFP_KERNEL); } @@ -174,6 +178,7 @@ BUG_ON(!list_empty(&fl->fl_block)); BUG_ON(!list_empty(&fl->fl_link)); + vx_locks_dec(fl); locks_release_private(fl); kmem_cache_free(filelock_cache, fl); } @@ -194,6 +199,7 @@ fl->fl_start = fl->fl_end = 0; fl->fl_ops = NULL; fl->fl_lmops = NULL; + fl->fl_xid = -1; } EXPORT_SYMBOL(locks_init_lock); @@ -248,6 +254,7 @@ new->fl_file = fl->fl_file; new->fl_ops = fl->fl_ops; new->fl_lmops = fl->fl_lmops; + new->fl_xid = fl->fl_xid; locks_copy_private(new, fl); } @@ -286,6 +293,11 @@ fl->fl_flags = FL_FLOCK; fl->fl_type = type; fl->fl_end = OFFSET_MAX; + + vxd_assert(filp->f_xid == vx_current_xid(), + "f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid()); + fl->fl_xid = filp->f_xid; + vx_locks_inc(fl); *lock = fl; return 0; @@ -451,6 +463,7 @@ fl->fl_owner = current->files; fl->fl_pid = current->tgid; + fl->fl_xid = vx_current_xid(); fl->fl_file = filp; fl->fl_flags = FL_LEASE; @@ -470,6 +483,11 @@ if (fl == NULL) return ERR_PTR(error); + fl->fl_xid = vx_current_xid(); + if (filp) + vxd_assert(filp->f_xid == fl->fl_xid, + "f_xid(%d) == fl_xid(%d)", filp->f_xid, fl->fl_xid); + vx_locks_inc(fl); error = lease_init(filp, type, fl); if (error) { locks_free_lock(fl); @@ -770,6 +788,7 @@ if (found) cond_resched(); + new_fl->fl_xid = -1; find_conflict: for_each_lock(inode, before) { struct file_lock *fl = *before; @@ -790,6 +809,7 @@ goto out; locks_copy_lock(new_fl, request); locks_insert_lock(before, new_fl); + vx_locks_inc(new_fl); new_fl = NULL; error = 0; @@ -800,7 +820,8 @@ return error; } -static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock) +static int __posix_lock_file(struct inode *inode, struct file_lock *request, + struct file_lock *conflock, xid_t xid) { struct file_lock *fl; struct file_lock *new_fl = NULL; @@ -810,6 +831,8 @@ struct file_lock **before; int error, added = 0; + vxd_assert(xid == vx_current_xid(), + "xid(%d) == current(%d)", xid, vx_current_xid()); /* * We may need two file_lock structures for this operation, * so we get them in advance to avoid races. @@ -820,7 +843,11 @@ (request->fl_type != F_UNLCK || request->fl_start != 0 || request->fl_end != OFFSET_MAX)) { new_fl = locks_alloc_lock(); + new_fl->fl_xid = xid; + vx_locks_inc(new_fl); new_fl2 = locks_alloc_lock(); + new_fl2->fl_xid = xid; + vx_locks_inc(new_fl2); } lock_kernel(); @@ -1019,7 +1046,8 @@ int posix_lock_file(struct file *filp, struct file_lock *fl, struct file_lock *conflock) { - return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock); + return __posix_lock_file(filp->f_path.dentry->d_inode, + fl, conflock, filp->f_xid); } EXPORT_SYMBOL(posix_lock_file); @@ -1109,7 +1137,7 @@ fl.fl_end = offset + count - 1; for (;;) { - error = __posix_lock_file(inode, &fl, NULL); + error = __posix_lock_file(inode, &fl, NULL, filp->f_xid); if (error != FILE_LOCK_DEFERRED) break; error = wait_event_interruptible(fl.fl_wait, !fl.fl_next); @@ -1424,6 +1452,7 @@ locks_copy_lock(new_fl, lease); locks_insert_lock(before, new_fl); + vx_locks_inc(new_fl); *flp = new_fl; return 0; @@ -1779,6 +1808,11 @@ if (file_lock == NULL) return -ENOLCK; + vxd_assert(filp->f_xid == vx_current_xid(), + "f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid()); + file_lock->fl_xid = filp->f_xid; + vx_locks_inc(file_lock); + /* * This might block, so we do it before checking the inode. */ @@ -1897,6 +1931,11 @@ if (file_lock == NULL) return -ENOLCK; + vxd_assert(filp->f_xid == vx_current_xid(), + "f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid()); + file_lock->fl_xid = filp->f_xid; + vx_locks_inc(file_lock); + /* * This might block, so we do it before checking the inode. */ @@ -2162,8 +2201,11 @@ lock_get_status(f, fl, (long)f->private, ""); - list_for_each_entry(bfl, &fl->fl_block, fl_block) + list_for_each_entry(bfl, &fl->fl_block, fl_block) { + if (!vx_check(fl->fl_xid, VS_WATCH_P | VS_IDENT)) + continue; lock_get_status(f, bfl, (long)f->private, " ->"); + } f->private++; return 0; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/namei.c kernel-2.6.32.54.vs/linux-2.6.32/fs/namei.c --- kernel-2.6.32.54/linux-2.6.32/fs/namei.c 2012-01-16 15:01:40.272724159 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/namei.c 2012-01-16 14:51:21.849408939 +0100 @@ -33,6 +33,14 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include #include #define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE]) @@ -169,6 +177,77 @@ EXPORT_SYMBOL(putname); #endif +static inline int dx_barrier(const struct inode *inode) +{ + if (IS_BARRIER(inode) && !vx_check(0, VS_ADMIN | VS_WATCH)) { + vxwprintk_task(1, "did hit the barrier."); + return 1; + } + return 0; +} + +static int __dx_permission(const struct inode *inode, int mask) +{ + if (dx_barrier(inode)) + return -EACCES; + + if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) { + /* devpts is xid tagged */ + if (S_ISDIR(inode->i_mode) || + vx_check((xid_t)inode->i_tag, VS_IDENT | VS_WATCH_P)) + return 0; + } + else if (inode->i_sb->s_magic == PROC_SUPER_MAGIC) { + struct proc_dir_entry *de = PDE(inode); + + if (de && !vx_hide_check(0, de->vx_flags)) + goto out; + + if ((mask & (MAY_WRITE | MAY_APPEND))) { + struct pid *pid; + struct task_struct *tsk; + + if (vx_check(0, VS_ADMIN | VS_WATCH_P) || + vx_flags(VXF_STATE_SETUP, 0)) + return 0; + + pid = PROC_I(inode)->pid; + if (!pid) + goto out; + + tsk = pid_task(pid, PIDTYPE_PID); + vxdprintk(VXD_CBIT(tag, 0), "accessing %p[#%u]", + tsk, (tsk ? vx_task_xid(tsk) : 0)); + if (tsk && vx_check(vx_task_xid(tsk), VS_IDENT | VS_WATCH_P)) + return 0; + } + else { + /* FIXME: Should we block some entries here? */ + return 0; + } + } + else { + if (dx_notagcheck(inode->i_sb) || + dx_check(inode->i_tag, DX_HOSTID | DX_ADMIN | DX_WATCH | + DX_IDENT)) + return 0; + } + +out: + return -EACCES; +} + +int dx_permission(const struct inode *inode, int mask) +{ + int ret = __dx_permission(inode, mask); + if (unlikely(ret)) { + vxwprintk_task(1, "denied %x access to %s:%p[#%d,%lu]", + mask, inode->i_sb->s_id, inode, inode->i_tag, + inode->i_ino); + } + return ret; +} + /* * This does basic POSIX ACL permission checking */ @@ -269,10 +348,14 @@ /* * Nobody gets write access to an immutable file. */ - if (IS_IMMUTABLE(inode)) + if (IS_IMMUTABLE(inode) && !IS_COW(inode)) return -EACCES; } + retval = dx_permission(inode, mask); + if (retval) + return retval; + if (inode->i_op->permission) retval = inode->i_op->permission(inode, mask); else @@ -448,6 +531,9 @@ { int ret; + if (dx_barrier(inode)) + return -EACCES; + if (inode->i_op->permission) { ret = inode->i_op->permission(inode, MAY_EXEC); if (!ret) @@ -763,7 +849,8 @@ if (nd->path.dentry == nd->root.dentry && nd->path.mnt == nd->root.mnt) { - break; + /* for sane '/' avoid follow_mount() */ + return; } spin_lock(&dcache_lock); if (nd->path.dentry != nd->path.mnt->mnt_root) { @@ -799,16 +886,30 @@ { struct vfsmount *mnt = nd->path.mnt; struct dentry *dentry = __d_lookup(nd->path.dentry, name); + struct inode *inode; if (!dentry) goto need_lookup; if (dentry->d_op && dentry->d_op->d_revalidate) goto need_revalidate; + inode = dentry->d_inode; + if (!inode) + goto done; + + if (__dx_permission(inode, MAY_ACCESS)) + goto hidden; + done: path->mnt = mnt; path->dentry = dentry; __follow_mount(path); return 0; +hidden: + vxwprintk_task(1, "did lookup hidden %s:%p[#%d,%lu] »%s/%.*s«.", + inode->i_sb->s_id, inode, inode->i_tag, inode->i_ino, + vxd_path(&nd->path), name->len, name->name); + dput(dentry); + return -ENOENT; need_lookup: dentry = real_lookup(nd->path.dentry, name, nd); @@ -1400,7 +1501,7 @@ if (IS_APPEND(dir)) return -EPERM; if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)|| - IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) + IS_IXORUNLINK(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) return -EPERM; if (isdir) { if (!S_ISDIR(victim->d_inode->i_mode)) @@ -1540,6 +1641,14 @@ break; } +#ifdef CONFIG_VSERVER_COWBL + if (IS_COW(inode) && (flag & FMODE_WRITE)) { + if (IS_COW_LINK(inode)) + return -EMLINK; + inode->i_flags &= ~(S_IXUNLINK | S_IMMUTABLE); + mark_inode_dirty(inode); + } +#endif error = inode_permission(inode, acc_mode); if (error) return error; @@ -1688,7 +1797,11 @@ int count = 0; int will_write; int flag = open_to_namei_flags(open_flag); - +#ifdef CONFIG_VSERVER_COWBL + int rflag = flag; + int rmode = mode; +restart: +#endif if (!acc_mode) acc_mode = MAY_OPEN | ACC_MODE(flag); @@ -1836,6 +1949,25 @@ goto exit; } error = may_open(&nd.path, acc_mode, flag); +#ifdef CONFIG_VSERVER_COWBL + if (error == -EMLINK) { + struct dentry *dentry; + dentry = cow_break_link(pathname); + if (IS_ERR(dentry)) { + error = PTR_ERR(dentry); + goto exit_cow; + } + dput(dentry); + if (will_write) + mnt_drop_write(nd.path.mnt); + release_open_intent(&nd); + path_put(&nd.path); + flag = rflag; + mode = rmode; + goto restart; + } +exit_cow: +#endif if (error) { if (will_write) mnt_drop_write(nd.path.mnt); @@ -1998,9 +2130,17 @@ if (error) return error; - if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD)) + if (!(S_ISCHR(mode) || S_ISBLK(mode))) + goto okay; + + if (!capable(CAP_MKNOD)) return -EPERM; + if (S_ISCHR(mode) && !vs_chrdev_perm(dev, DATTR_CREATE)) + return -EPERM; + if (S_ISBLK(mode) && !vs_blkdev_perm(dev, DATTR_CREATE)) + return -EPERM; +okay: if (!dir->i_op->mknod) return -EPERM; @@ -2469,7 +2609,7 @@ /* * A link to an append-only or immutable file cannot be created. */ - if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) + if (IS_APPEND(inode) || IS_IXORUNLINK(inode)) return -EPERM; if (!dir->i_op->link) return -EPERM; @@ -2842,6 +2982,219 @@ return __vfs_follow_link(nd, link); } + +#ifdef CONFIG_VSERVER_COWBL + +#include + +static inline +long do_cow_splice(struct file *in, struct file *out, size_t len) +{ + loff_t ppos = 0; + + return do_splice_direct(in, &ppos, out, len, 0); +} + +struct dentry *cow_break_link(const char *pathname) +{ + int ret, mode, pathlen, redo = 0; + struct nameidata old_nd, dir_nd; + struct path old_path, new_path; + struct dentry *dir, *res = NULL; + struct file *old_file; + struct file *new_file; + char *to, *path, pad='\251'; + loff_t size; + + vxdprintk(VXD_CBIT(misc, 1), "cow_break_link(»%s«)", pathname); + path = kmalloc(PATH_MAX, GFP_KERNEL); + ret = -ENOMEM; + if (!path) + goto out; + + /* old_nd will have refs to dentry and mnt */ + ret = path_lookup(pathname, LOOKUP_FOLLOW, &old_nd); + vxdprintk(VXD_CBIT(misc, 2), "path_lookup(old): %d", ret); + if (ret < 0) + goto out_free_path; + + old_path = old_nd.path; + mode = old_path.dentry->d_inode->i_mode; + + to = d_path(&old_path, path, PATH_MAX-2); + pathlen = strlen(to); + vxdprintk(VXD_CBIT(misc, 2), "old path »%s« [»%.*s«:%d]", to, + old_path.dentry->d_name.len, old_path.dentry->d_name.name, + old_path.dentry->d_name.len); + + to[pathlen + 1] = 0; +retry: + to[pathlen] = pad--; + ret = -EMLINK; + if (pad <= '\240') + goto out_rel_old; + + vxdprintk(VXD_CBIT(misc, 1), "temp copy »%s«", to); + /* dir_nd will have refs to dentry and mnt */ + ret = path_lookup(to, + LOOKUP_PARENT | LOOKUP_OPEN | LOOKUP_CREATE, &dir_nd); + vxdprintk(VXD_CBIT(misc, 2), + "path_lookup(new): %d", ret); + if (ret < 0) + goto retry; + + /* this puppy downs the inode mutex */ + new_path.dentry = lookup_create(&dir_nd, 0); + if (!new_path.dentry || IS_ERR(new_path.dentry)) { + vxdprintk(VXD_CBIT(misc, 2), + "lookup_create(new): %p", new_path.dentry); + mutex_unlock(&dir_nd.path.dentry->d_inode->i_mutex); + path_put(&dir_nd.path); + goto retry; + } + vxdprintk(VXD_CBIT(misc, 2), + "lookup_create(new): %p [»%.*s«:%d]", new_path.dentry, + new_path.dentry->d_name.len, new_path.dentry->d_name.name, + new_path.dentry->d_name.len); + dir = dir_nd.path.dentry; + + ret = vfs_create(dir_nd.path.dentry->d_inode, new_path.dentry, mode, &dir_nd); + vxdprintk(VXD_CBIT(misc, 2), + "vfs_create(new): %d", ret); + if (ret == -EEXIST) { + mutex_unlock(&dir->d_inode->i_mutex); + dput(new_path.dentry); + path_put(&dir_nd.path); + goto retry; + } + else if (ret < 0) + goto out_unlock_new; + + /* drop out early, ret passes ENOENT */ + ret = -ENOENT; + if ((redo = d_unhashed(old_path.dentry))) + goto out_unlock_new; + + new_path.mnt = dir_nd.path.mnt; + dget(old_path.dentry); + mntget(old_path.mnt); + /* this one cleans up the dentry/mnt in case of failure */ + old_file = dentry_open(old_path.dentry, old_path.mnt, + O_RDONLY, current_cred()); + vxdprintk(VXD_CBIT(misc, 2), + "dentry_open(old): %p", old_file); + if (!old_file || IS_ERR(old_file)) { + res = IS_ERR(old_file) ? (void *) old_file : res; + goto out_unlock_new; + } + + dget(new_path.dentry); + mntget(new_path.mnt); + /* this one cleans up the dentry/mnt in case of failure */ + new_file = dentry_open(new_path.dentry, new_path.mnt, + O_WRONLY, current_cred()); + vxdprintk(VXD_CBIT(misc, 2), + "dentry_open(new): %p", new_file); + + ret = IS_ERR(new_file) ? PTR_ERR(new_file) : -ENOENT; + if (!new_file || IS_ERR(new_file)) + goto out_fput_old; + + size = i_size_read(old_file->f_dentry->d_inode); + ret = do_cow_splice(old_file, new_file, size); + vxdprintk(VXD_CBIT(misc, 2), "do_splice_direct: %d", ret); + if (ret < 0) { + goto out_fput_both; + } else if (ret < size) { + ret = -ENOSPC; + goto out_fput_both; + } else { + struct inode *old_inode = old_path.dentry->d_inode; + struct inode *new_inode = new_path.dentry->d_inode; + struct iattr attr = { + .ia_uid = old_inode->i_uid, + .ia_gid = old_inode->i_gid, + .ia_valid = ATTR_UID | ATTR_GID + }; + + ret = inode_setattr(new_inode, &attr); + if (ret) + goto out_fput_both; + } + + mutex_lock(&old_path.dentry->d_inode->i_sb->s_vfs_rename_mutex); + + /* drop out late */ + ret = -ENOENT; + if ((redo = d_unhashed(old_path.dentry))) + goto out_unlock; + + vxdprintk(VXD_CBIT(misc, 2), + "vfs_rename: [»%*s«:%d] -> [»%*s«:%d]", + new_path.dentry->d_name.len, new_path.dentry->d_name.name, + new_path.dentry->d_name.len, + old_path.dentry->d_name.len, old_path.dentry->d_name.name, + old_path.dentry->d_name.len); + ret = vfs_rename(dir_nd.path.dentry->d_inode, new_path.dentry, + old_nd.path.dentry->d_parent->d_inode, old_path.dentry); + vxdprintk(VXD_CBIT(misc, 2), "vfs_rename: %d", ret); + res = new_path.dentry; + +out_unlock: + mutex_unlock(&old_path.dentry->d_inode->i_sb->s_vfs_rename_mutex); + +out_fput_both: + vxdprintk(VXD_CBIT(misc, 3), + "fput(new_file=%p[#%ld])", new_file, + atomic_long_read(&new_file->f_count)); + fput(new_file); + +out_fput_old: + vxdprintk(VXD_CBIT(misc, 3), + "fput(old_file=%p[#%ld])", old_file, + atomic_long_read(&old_file->f_count)); + fput(old_file); + +out_unlock_new: + mutex_unlock(&dir->d_inode->i_mutex); + if (!ret) + goto out_redo; + + /* error path cleanup */ + vfs_unlink(dir->d_inode, new_path.dentry); + dput(new_path.dentry); + +out_redo: + if (!redo) + goto out_rel_both; + /* lookup dentry once again */ + path_put(&old_nd.path); + ret = path_lookup(pathname, LOOKUP_FOLLOW, &old_nd); + if (ret) + goto out_rel_both; + + new_path.dentry = old_nd.path.dentry; + vxdprintk(VXD_CBIT(misc, 2), + "path_lookup(redo): %p [»%.*s«:%d]", new_path.dentry, + new_path.dentry->d_name.len, new_path.dentry->d_name.name, + new_path.dentry->d_name.len); + dget(new_path.dentry); + res = new_path.dentry; + +out_rel_both: + path_put(&dir_nd.path); +out_rel_old: + path_put(&old_nd.path); +out_free_path: + kfree(path); +out: + if (ret) + res = ERR_PTR(ret); + return res; +} + +#endif + /* get the link contents into pagecache */ static char *page_getlink(struct dentry * dentry, struct page **ppage) { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/namei.c.orig kernel-2.6.32.54.vs/linux-2.6.32/fs/namei.c.orig --- kernel-2.6.32.54/linux-2.6.32/fs/namei.c.orig 2012-01-16 15:01:39.576726623 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/namei.c.orig 2012-01-16 14:47:19.470254871 +0100 @@ -2284,6 +2284,8 @@ if (!dir->i_op->unlink) return -EPERM; + vfs_check_frozen(dir->i_sb, SB_FREEZE_WRITE); + vfs_dq_init(dir); mutex_lock(&dentry->d_inode->i_mutex); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/namespace.c kernel-2.6.32.54.vs/linux-2.6.32/fs/namespace.c --- kernel-2.6.32.54/linux-2.6.32/fs/namespace.c 2012-01-16 15:01:39.576726623 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/namespace.c 2012-01-16 14:51:21.853408925 +0100 @@ -29,6 +29,11 @@ #include #include #include +#include +#include +#include +#include +#include #include #include #include "pnode.h" @@ -567,6 +572,7 @@ mnt->mnt_root = dget(root); mnt->mnt_mountpoint = mnt->mnt_root; mnt->mnt_parent = mnt; + mnt->mnt_tag = old->mnt_tag; if (flag & CL_SLAVE) { list_add(&mnt->mnt_slave, &old->mnt_slave_list); @@ -661,6 +667,31 @@ seq_escape(m, s, " \t\n\\"); } +static int mnt_is_reachable(struct vfsmount *mnt) +{ + struct path root; + struct dentry *point; + int ret; + + if (mnt == mnt->mnt_ns->root) + return 1; + + spin_lock(&vfsmount_lock); + root = current->fs->root; + point = root.dentry; + + while ((mnt != mnt->mnt_parent) && (mnt != root.mnt)) { + point = mnt->mnt_mountpoint; + mnt = mnt->mnt_parent; + } + + ret = (mnt == root.mnt) && is_subdir(point, root.dentry); + + spin_unlock(&vfsmount_lock); + + return ret; +} + /* * Simple .show_options callback for filesystems which don't want to * implement more complex mount option showing. @@ -748,6 +779,8 @@ { MS_SYNCHRONOUS, ",sync" }, { MS_DIRSYNC, ",dirsync" }, { MS_MANDLOCK, ",mand" }, + { MS_TAGGED, ",tag" }, + { MS_NOTAGCHECK, ",notagcheck" }, { 0, NULL } }; const struct proc_fs_info *fs_infop; @@ -795,10 +828,20 @@ int err = 0; struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; - mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none"); - seq_putc(m, ' '); - seq_path(m, &mnt_path, " \t\n\\"); - seq_putc(m, ' '); + if (vx_flags(VXF_HIDE_MOUNT, 0)) + return SEQ_SKIP; + if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P)) + return SEQ_SKIP; + + if (!vx_check(0, VS_ADMIN|VS_WATCH) && + mnt == current->fs->root.mnt) { + seq_puts(m, "/dev/root / "); + } else { + mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none"); + seq_putc(m, ' '); + seq_path(m, &mnt_path, " \t\n\\"); + seq_putc(m, ' '); + } show_type(m, mnt->mnt_sb); seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw"); err = show_sb_opts(m, mnt->mnt_sb); @@ -828,6 +871,11 @@ struct path root = p->root; int err = 0; + if (vx_flags(VXF_HIDE_MOUNT, 0)) + return SEQ_SKIP; + if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P)) + return SEQ_SKIP; + seq_printf(m, "%i %i %u:%u ", mnt->mnt_id, mnt->mnt_parent->mnt_id, MAJOR(sb->s_dev), MINOR(sb->s_dev)); seq_dentry(m, mnt->mnt_root, " \t\n\\"); @@ -886,17 +934,27 @@ struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; int err = 0; - /* device */ - if (mnt->mnt_devname) { - seq_puts(m, "device "); - mangle(m, mnt->mnt_devname); - } else - seq_puts(m, "no device"); - - /* mount point */ - seq_puts(m, " mounted on "); - seq_path(m, &mnt_path, " \t\n\\"); - seq_putc(m, ' '); + if (vx_flags(VXF_HIDE_MOUNT, 0)) + return SEQ_SKIP; + if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P)) + return SEQ_SKIP; + + if (!vx_check(0, VS_ADMIN|VS_WATCH) && + mnt == current->fs->root.mnt) { + seq_puts(m, "device /dev/root mounted on / "); + } else { + /* device */ + if (mnt->mnt_devname) { + seq_puts(m, "device "); + mangle(m, mnt->mnt_devname); + } else + seq_puts(m, "no device"); + + /* mount point */ + seq_puts(m, " mounted on "); + seq_path(m, &mnt_path, " \t\n\\"); + seq_putc(m, ' '); + } /* file system type */ seq_puts(m, "with fstype "); @@ -1137,7 +1195,7 @@ goto dput_and_out; retval = -EPERM; - if (!capable(CAP_SYS_ADMIN)) + if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT)) goto dput_and_out; retval = do_umount(path.mnt, flags); @@ -1163,7 +1221,7 @@ static int mount_is_safe(struct path *path) { - if (capable(CAP_SYS_ADMIN)) + if (vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT)) return 0; return -EPERM; #ifdef notyet @@ -1427,7 +1485,7 @@ int type = flag & ~MS_REC; int err = 0; - if (!capable(CAP_SYS_ADMIN)) + if (!vx_capable(CAP_SYS_ADMIN, VXC_NAMESPACE)) return -EPERM; if (path->dentry != path->mnt->mnt_root) @@ -1454,11 +1512,13 @@ * do loopback mount. */ static int do_loopback(struct path *path, char *old_name, - int recurse) + tag_t tag, unsigned long flags, int mnt_flags) { struct path old_path; struct vfsmount *mnt = NULL; int err = mount_is_safe(path); + int recurse = flags & MS_REC; + if (err) return err; if (!old_name || !*old_name) @@ -1492,6 +1552,7 @@ spin_unlock(&vfsmount_lock); release_mounts(&umount_list); } + mnt->mnt_flags = mnt_flags; out: up_write(&namespace_sem); @@ -1522,12 +1583,12 @@ * on it - tough luck. */ static int do_remount(struct path *path, int flags, int mnt_flags, - void *data) + void *data, xid_t xid) { int err; struct super_block *sb = path->mnt->mnt_sb; - if (!capable(CAP_SYS_ADMIN)) + if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_REMOUNT)) return -EPERM; if (!check_mnt(path->mnt)) @@ -1569,7 +1630,7 @@ struct path old_path, parent_path; struct vfsmount *p; int err = 0; - if (!capable(CAP_SYS_ADMIN)) + if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT)) return -EPERM; if (!old_name || !*old_name) return -EINVAL; @@ -1651,7 +1712,7 @@ return -EINVAL; /* we need capabilities... */ - if (!capable(CAP_SYS_ADMIN)) + if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT)) return -EPERM; lock_kernel(); @@ -1915,6 +1976,7 @@ struct path path; int retval = 0; int mnt_flags = 0; + tag_t tag = 0; /* Discard magic */ if ((flags & MS_MGC_MSK) == MS_MGC_VAL) @@ -1932,6 +1994,12 @@ if (!(flags & MS_NOATIME)) mnt_flags |= MNT_RELATIME; + if (dx_parse_tag(data_page, &tag, 1, &mnt_flags, &flags)) { + /* FIXME: bind and re-mounts get the tag flag? */ + if (flags & (MS_BIND|MS_REMOUNT)) + flags |= MS_TAGID; + } + /* Separate the per-mountpoint flags */ if (flags & MS_NOSUID) mnt_flags |= MNT_NOSUID; @@ -1948,6 +2016,8 @@ if (flags & MS_RDONLY) mnt_flags |= MNT_READONLY; + if (!capable(CAP_SYS_ADMIN)) + mnt_flags |= MNT_NODEV; flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | MS_STRICTATIME); @@ -1964,9 +2034,9 @@ if (flags & MS_REMOUNT) retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, - data_page); + data_page, tag); else if (flags & MS_BIND) - retval = do_loopback(&path, dev_name, flags & MS_REC); + retval = do_loopback(&path, dev_name, tag, flags, mnt_flags); else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) retval = do_change_type(&path, flags); else if (flags & MS_MOVE) @@ -2045,6 +2115,7 @@ q = next_mnt(q, new_ns->root); } up_write(&namespace_sem); + atomic_inc(&vs_global_mnt_ns); if (rootmnt) mntput(rootmnt); @@ -2189,9 +2260,10 @@ down_write(&namespace_sem); mutex_lock(&old.dentry->d_inode->i_mutex); error = -EINVAL; - if (IS_MNT_SHARED(old.mnt) || + if ((IS_MNT_SHARED(old.mnt) || IS_MNT_SHARED(new.mnt->mnt_parent) || - IS_MNT_SHARED(root.mnt->mnt_parent)) + IS_MNT_SHARED(root.mnt->mnt_parent)) && + !vx_flags(VXF_STATE_SETUP, 0)) goto out2; if (!check_mnt(root.mnt)) goto out2; @@ -2327,6 +2399,7 @@ spin_unlock(&vfsmount_lock); up_write(&namespace_sem); release_mounts(&umount_list); + atomic_dec(&vs_global_mnt_ns); kfree(ns); } EXPORT_SYMBOL(put_mnt_ns); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/nfs/client.c kernel-2.6.32.54.vs/linux-2.6.32/fs/nfs/client.c --- kernel-2.6.32.54/linux-2.6.32/fs/nfs/client.c 2012-01-16 15:01:39.576726623 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/nfs/client.c 2012-01-16 14:51:21.853408925 +0100 @@ -738,6 +738,9 @@ if (server->flags & NFS_MOUNT_SOFT) server->client->cl_softrtry = 1; + server->client->cl_tag = 0; + if (server->flags & NFS_MOUNT_TAGGED) + server->client->cl_tag = 1; return 0; } @@ -909,6 +912,10 @@ server->acdirmin = server->acdirmax = 0; } + /* FIXME: needs fsinfo + if (server->flags & NFS_MOUNT_TAGGED) + sb->s_flags |= MS_TAGGED; */ + server->maxfilesize = fsinfo->maxfilesize; /* We're airborne Set socket buffersize */ diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/nfs/dir.c kernel-2.6.32.54.vs/linux-2.6.32/fs/nfs/dir.c --- kernel-2.6.32.54/linux-2.6.32/fs/nfs/dir.c 2012-01-16 15:01:39.576726623 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/nfs/dir.c 2012-01-16 14:51:21.853408925 +0100 @@ -33,6 +33,7 @@ #include #include #include +#include #include "nfs4_fs.h" #include "delegation.h" @@ -951,6 +952,7 @@ if (IS_ERR(res)) goto out_unblock_sillyrename; + dx_propagate_tag(nd, inode); no_entry: res = d_materialise_unique(dentry, inode); if (res != NULL) { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/nfs/inode.c kernel-2.6.32.54.vs/linux-2.6.32/fs/nfs/inode.c --- kernel-2.6.32.54/linux-2.6.32/fs/nfs/inode.c 2012-01-16 15:01:39.580726609 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/nfs/inode.c 2012-01-16 14:51:21.853408925 +0100 @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -279,6 +280,8 @@ if (inode->i_state & I_NEW) { struct nfs_inode *nfsi = NFS_I(inode); unsigned long now = jiffies; + uid_t uid; + gid_t gid; /* We set i_ino for the few things that still rely on it, * such as stat(2) */ @@ -327,8 +330,8 @@ nfsi->change_attr = 0; inode->i_size = 0; inode->i_nlink = 0; - inode->i_uid = -2; - inode->i_gid = -2; + uid = -2; + gid = -2; inode->i_blocks = 0; memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf)); @@ -365,17 +368,25 @@ else if (nfs_server_capable(inode, NFS_CAP_NLINK)) nfsi->cache_validity |= NFS_INO_INVALID_ATTR; if (fattr->valid & NFS_ATTR_FATTR_OWNER) - inode->i_uid = fattr->uid; - else if (nfs_server_capable(inode, NFS_CAP_OWNER)) - nfsi->cache_validity |= NFS_INO_INVALID_ATTR - | NFS_INO_INVALID_ACCESS - | NFS_INO_INVALID_ACL; + uid = fattr->uid; + else { + uid = TAGINO_UID(DX_TAG(inode), + inode->i_uid, inode->i_tag); + if (nfs_server_capable(inode, NFS_CAP_OWNER)) + nfsi->cache_validity |= NFS_INO_INVALID_ATTR + | NFS_INO_INVALID_ACCESS + | NFS_INO_INVALID_ACL; + } if (fattr->valid & NFS_ATTR_FATTR_GROUP) - inode->i_gid = fattr->gid; - else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP)) - nfsi->cache_validity |= NFS_INO_INVALID_ATTR - | NFS_INO_INVALID_ACCESS - | NFS_INO_INVALID_ACL; + gid = fattr->gid; + else { + gid = TAGINO_GID(DX_TAG(inode), + inode->i_gid, inode->i_tag); + if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP)) + nfsi->cache_validity |= NFS_INO_INVALID_ATTR + | NFS_INO_INVALID_ACCESS + | NFS_INO_INVALID_ACL; + } if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED) inode->i_blocks = fattr->du.nfs2.blocks; if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) { @@ -384,6 +395,11 @@ */ inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used); } + inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid); + inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid); + inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid, 0); + /* maybe fattr->xid someday */ + nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); nfsi->attrtimeo_timestamp = now; nfsi->access_cache = RB_ROOT; @@ -393,11 +409,11 @@ unlock_new_inode(inode); } else nfs_refresh_inode(inode, fattr); + dprintk("NFS: nfs_fhget(%s/%Ld ct=%d)\n", inode->i_sb->s_id, (long long)NFS_FILEID(inode), atomic_read(&inode->i_count)); - out: return inode; @@ -496,6 +512,8 @@ inode->i_uid = attr->ia_uid; if ((attr->ia_valid & ATTR_GID) != 0) inode->i_gid = attr->ia_gid; + if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode)) + inode->i_tag = attr->ia_tag; NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; spin_unlock(&inode->i_lock); } @@ -914,6 +932,9 @@ struct nfs_inode *nfsi = NFS_I(inode); loff_t cur_size, new_isize; unsigned long invalid = 0; + uid_t uid; + gid_t gid; + tag_t tag; /* Has the inode gone and changed behind our back? */ @@ -937,13 +958,18 @@ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE; } + uid = INOTAG_UID(DX_TAG(inode), fattr->uid, fattr->gid); + gid = INOTAG_GID(DX_TAG(inode), fattr->uid, fattr->gid); + tag = INOTAG_TAG(DX_TAG(inode), fattr->uid, fattr->gid, 0); + /* Have any file permissions changed? */ if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL; - if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && inode->i_uid != fattr->uid) + if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && uid != fattr->uid) invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL; - if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && inode->i_gid != fattr->gid) + if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && gid != fattr->gid) invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL; + /* maybe check for tag too? */ /* Has the link count changed? */ if ((fattr->valid & NFS_ATTR_FATTR_NLINK) && inode->i_nlink != fattr->nlink) @@ -1158,6 +1184,9 @@ unsigned long invalid = 0; unsigned long now = jiffies; unsigned long save_cache_validity; + uid_t uid; + gid_t gid; + tag_t tag; dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n", __func__, inode->i_sb->s_id, inode->i_ino, @@ -1260,6 +1289,9 @@ | NFS_INO_REVAL_PAGECACHE | NFS_INO_REVAL_FORCED); + uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag); + gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag); + tag = inode->i_tag; if (fattr->valid & NFS_ATTR_FATTR_ATIME) memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime)); @@ -1279,9 +1311,9 @@ | NFS_INO_REVAL_FORCED); if (fattr->valid & NFS_ATTR_FATTR_OWNER) { - if (inode->i_uid != fattr->uid) { + if (uid != fattr->uid) { invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; - inode->i_uid = fattr->uid; + uid = fattr->uid; } } else if (server->caps & NFS_CAP_OWNER) invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR @@ -1290,9 +1322,9 @@ | NFS_INO_REVAL_FORCED); if (fattr->valid & NFS_ATTR_FATTR_GROUP) { - if (inode->i_gid != fattr->gid) { + if (gid != fattr->gid) { invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; - inode->i_gid = fattr->gid; + gid = fattr->gid; } } else if (server->caps & NFS_CAP_OWNER_GROUP) invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR @@ -1300,6 +1332,10 @@ | NFS_INO_INVALID_ACL | NFS_INO_REVAL_FORCED); + inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid); + inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid); + inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid, tag); + if (fattr->valid & NFS_ATTR_FATTR_NLINK) { if (inode->i_nlink != fattr->nlink) { invalid |= NFS_INO_INVALID_ATTR; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/nfs/nfs3xdr.c kernel-2.6.32.54.vs/linux-2.6.32/fs/nfs/nfs3xdr.c --- kernel-2.6.32.54/linux-2.6.32/fs/nfs/nfs3xdr.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/nfs/nfs3xdr.c 2012-01-16 14:51:21.861408897 +0100 @@ -21,6 +21,7 @@ #include #include #include +#include #include "internal.h" #define NFSDBG_FACILITY NFSDBG_XDR @@ -176,7 +177,7 @@ } static inline __be32 * -xdr_encode_sattr(__be32 *p, struct iattr *attr) +xdr_encode_sattr(__be32 *p, struct iattr *attr, int tag) { if (attr->ia_valid & ATTR_MODE) { *p++ = xdr_one; @@ -184,15 +185,17 @@ } else { *p++ = xdr_zero; } - if (attr->ia_valid & ATTR_UID) { + if (attr->ia_valid & ATTR_UID || + (tag && (attr->ia_valid & ATTR_TAG))) { *p++ = xdr_one; - *p++ = htonl(attr->ia_uid); + *p++ = htonl(TAGINO_UID(tag, attr->ia_uid, attr->ia_tag)); } else { *p++ = xdr_zero; } - if (attr->ia_valid & ATTR_GID) { + if (attr->ia_valid & ATTR_GID || + (tag && (attr->ia_valid & ATTR_TAG))) { *p++ = xdr_one; - *p++ = htonl(attr->ia_gid); + *p++ = htonl(TAGINO_GID(tag, attr->ia_gid, attr->ia_tag)); } else { *p++ = xdr_zero; } @@ -279,7 +282,8 @@ nfs3_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs3_sattrargs *args) { p = xdr_encode_fhandle(p, args->fh); - p = xdr_encode_sattr(p, args->sattr); + p = xdr_encode_sattr(p, args->sattr, + req->rq_task->tk_client->cl_tag); *p++ = htonl(args->guard); if (args->guard) p = xdr_encode_time3(p, &args->guardtime); @@ -384,7 +388,8 @@ *p++ = args->verifier[0]; *p++ = args->verifier[1]; } else - p = xdr_encode_sattr(p, args->sattr); + p = xdr_encode_sattr(p, args->sattr, + req->rq_task->tk_client->cl_tag); req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); return 0; @@ -398,7 +403,8 @@ { p = xdr_encode_fhandle(p, args->fh); p = xdr_encode_array(p, args->name, args->len); - p = xdr_encode_sattr(p, args->sattr); + p = xdr_encode_sattr(p, args->sattr, + req->rq_task->tk_client->cl_tag); req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); return 0; } @@ -411,7 +417,8 @@ { p = xdr_encode_fhandle(p, args->fromfh); p = xdr_encode_array(p, args->fromname, args->fromlen); - p = xdr_encode_sattr(p, args->sattr); + p = xdr_encode_sattr(p, args->sattr, + req->rq_task->tk_client->cl_tag); *p++ = htonl(args->pathlen); req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); @@ -429,7 +436,8 @@ p = xdr_encode_fhandle(p, args->fh); p = xdr_encode_array(p, args->name, args->len); *p++ = htonl(args->type); - p = xdr_encode_sattr(p, args->sattr); + p = xdr_encode_sattr(p, args->sattr, + req->rq_task->tk_client->cl_tag); if (args->type == NF3CHR || args->type == NF3BLK) { *p++ = htonl(MAJOR(args->rdev)); *p++ = htonl(MINOR(args->rdev)); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/nfs/nfsroot.c kernel-2.6.32.54.vs/linux-2.6.32/fs/nfs/nfsroot.c --- kernel-2.6.32.54/linux-2.6.32/fs/nfs/nfsroot.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/nfs/nfsroot.c 2012-01-16 14:51:21.861408897 +0100 @@ -122,12 +122,12 @@ enum { /* Options that take integer arguments */ Opt_port, Opt_rsize, Opt_wsize, Opt_timeo, Opt_retrans, Opt_acregmin, - Opt_acregmax, Opt_acdirmin, Opt_acdirmax, + Opt_acregmax, Opt_acdirmin, Opt_acdirmax, Opt_tagid, /* Options that take no arguments */ Opt_soft, Opt_hard, Opt_intr, Opt_nointr, Opt_posix, Opt_noposix, Opt_cto, Opt_nocto, Opt_ac, Opt_noac, Opt_lock, Opt_nolock, Opt_v2, Opt_v3, Opt_udp, Opt_tcp, - Opt_acl, Opt_noacl, + Opt_acl, Opt_noacl, Opt_tag, Opt_notag, /* Error token */ Opt_err }; @@ -164,6 +164,9 @@ {Opt_tcp, "tcp"}, {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, + {Opt_tag, "tag"}, + {Opt_notag, "notag"}, + {Opt_tagid, "tagid=%u"}, {Opt_err, NULL} }; @@ -275,6 +278,20 @@ case Opt_noacl: nfs_data.flags |= NFS_MOUNT_NOACL; break; +#ifndef CONFIG_TAGGING_NONE + case Opt_tag: + nfs_data.flags |= NFS_MOUNT_TAGGED; + break; + case Opt_notag: + nfs_data.flags &= ~NFS_MOUNT_TAGGED; + break; +#endif +#ifdef CONFIG_PROPAGATE + case Opt_tagid: + /* use args[0] */ + nfs_data.flags |= NFS_MOUNT_TAGGED; + break; +#endif default: printk(KERN_WARNING "Root-NFS: unknown " "option: %s\n", p); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/nfs/super.c kernel-2.6.32.54.vs/linux-2.6.32/fs/nfs/super.c --- kernel-2.6.32.54/linux-2.6.32/fs/nfs/super.c 2012-01-16 15:01:39.588726581 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/nfs/super.c 2012-01-16 14:51:21.861408897 +0100 @@ -53,6 +53,7 @@ #include #include #include +#include #include #include @@ -570,6 +571,7 @@ { NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" }, { NFS_MOUNT_UNSHARED, ",nosharecache", "" }, { NFS_MOUNT_NORESVPORT, ",noresvport", "" }, + { NFS_MOUNT_TAGGED, ",tag", "" }, { 0, NULL, NULL } }; const struct proc_nfs_info *nfs_infop; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/nfsd/auth.c kernel-2.6.32.54.vs/linux-2.6.32/fs/nfsd/auth.c --- kernel-2.6.32.54/linux-2.6.32/fs/nfsd/auth.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/nfsd/auth.c 2012-01-16 14:51:21.861408897 +0100 @@ -10,6 +10,7 @@ #include #include #include +#include #include "auth.h" int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp) @@ -44,6 +45,9 @@ new->fsuid = rqstp->rq_cred.cr_uid; new->fsgid = rqstp->rq_cred.cr_gid; + /* FIXME: this desperately needs a tag :) + new->xid = (xid_t)INOTAG_TAG(DX_TAG_NFSD, cred.cr_uid, cred.cr_gid, 0); + */ rqgi = rqstp->rq_cred.cr_group_info; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/nfsd/nfs3xdr.c kernel-2.6.32.54.vs/linux-2.6.32/fs/nfsd/nfs3xdr.c --- kernel-2.6.32.54/linux-2.6.32/fs/nfsd/nfs3xdr.c 2012-01-16 15:01:39.588726581 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/nfsd/nfs3xdr.c 2012-01-16 14:51:21.865408883 +0100 @@ -21,6 +21,7 @@ #include #include #include +#include #include "auth.h" #define NFSDDBG_FACILITY NFSDDBG_XDR @@ -108,6 +109,8 @@ decode_sattr3(__be32 *p, struct iattr *iap) { u32 tmp; + uid_t uid = 0; + gid_t gid = 0; iap->ia_valid = 0; @@ -117,12 +120,15 @@ } if (*p++) { iap->ia_valid |= ATTR_UID; - iap->ia_uid = ntohl(*p++); + uid = ntohl(*p++); } if (*p++) { iap->ia_valid |= ATTR_GID; - iap->ia_gid = ntohl(*p++); + gid = ntohl(*p++); } + iap->ia_uid = INOTAG_UID(DX_TAG_NFSD, uid, gid); + iap->ia_gid = INOTAG_GID(DX_TAG_NFSD, uid, gid); + iap->ia_tag = INOTAG_TAG(DX_TAG_NFSD, uid, gid, 0); if (*p++) { u64 newsize; @@ -178,8 +184,12 @@ *p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]); *p++ = htonl((u32) stat->mode); *p++ = htonl((u32) stat->nlink); - *p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid)); - *p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid)); + *p++ = htonl((u32) nfsd_ruid(rqstp, + TAGINO_UID(0 /* FIXME: DX_TAG(dentry->d_inode) */, + stat->uid, stat->tag))); + *p++ = htonl((u32) nfsd_rgid(rqstp, + TAGINO_GID(0 /* FIXME: DX_TAG(dentry->d_inode) */, + stat->gid, stat->tag))); if (S_ISLNK(stat->mode) && stat->size > NFS3_MAXPATHLEN) { p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN); } else { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/nfsd/nfs4xdr.c kernel-2.6.32.54.vs/linux-2.6.32/fs/nfsd/nfs4xdr.c --- kernel-2.6.32.54/linux-2.6.32/fs/nfsd/nfs4xdr.c 2012-01-16 15:01:39.592726567 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/nfsd/nfs4xdr.c 2012-01-16 14:51:21.865408883 +0100 @@ -57,6 +57,7 @@ #include #include #include +#include #define NFSDDBG_FACILITY NFSDDBG_XDR @@ -2047,14 +2048,18 @@ WRITE32(stat.nlink); } if (bmval1 & FATTR4_WORD1_OWNER) { - status = nfsd4_encode_user(rqstp, stat.uid, &p, &buflen); + status = nfsd4_encode_user(rqstp, + TAGINO_UID(DX_TAG(dentry->d_inode), + stat.uid, stat.tag), &p, &buflen); if (status == nfserr_resource) goto out_resource; if (status) goto out; } if (bmval1 & FATTR4_WORD1_OWNER_GROUP) { - status = nfsd4_encode_group(rqstp, stat.gid, &p, &buflen); + status = nfsd4_encode_group(rqstp, + TAGINO_GID(DX_TAG(dentry->d_inode), + stat.gid, stat.tag), &p, &buflen); if (status == nfserr_resource) goto out_resource; if (status) diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/nfsd/nfsxdr.c kernel-2.6.32.54.vs/linux-2.6.32/fs/nfsd/nfsxdr.c --- kernel-2.6.32.54/linux-2.6.32/fs/nfsd/nfsxdr.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/nfsd/nfsxdr.c 2012-01-16 14:51:21.865408883 +0100 @@ -15,6 +15,7 @@ #include #include #include +#include #include "auth.h" #define NFSDDBG_FACILITY NFSDDBG_XDR @@ -98,6 +99,8 @@ decode_sattr(__be32 *p, struct iattr *iap) { u32 tmp, tmp1; + uid_t uid = 0; + gid_t gid = 0; iap->ia_valid = 0; @@ -111,12 +114,15 @@ } if ((tmp = ntohl(*p++)) != (u32)-1) { iap->ia_valid |= ATTR_UID; - iap->ia_uid = tmp; + uid = tmp; } if ((tmp = ntohl(*p++)) != (u32)-1) { iap->ia_valid |= ATTR_GID; - iap->ia_gid = tmp; + gid = tmp; } + iap->ia_uid = INOTAG_UID(DX_TAG_NFSD, uid, gid); + iap->ia_gid = INOTAG_GID(DX_TAG_NFSD, uid, gid); + iap->ia_tag = INOTAG_TAG(DX_TAG_NFSD, uid, gid, 0); if ((tmp = ntohl(*p++)) != (u32)-1) { iap->ia_valid |= ATTR_SIZE; iap->ia_size = tmp; @@ -161,8 +167,10 @@ *p++ = htonl(nfs_ftypes[type >> 12]); *p++ = htonl((u32) stat->mode); *p++ = htonl((u32) stat->nlink); - *p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid)); - *p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid)); + *p++ = htonl((u32) nfsd_ruid(rqstp, + TAGINO_UID(DX_TAG(dentry->d_inode), stat->uid, stat->tag))); + *p++ = htonl((u32) nfsd_rgid(rqstp, + TAGINO_GID(DX_TAG(dentry->d_inode), stat->gid, stat->tag))); if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) { *p++ = htonl(NFS_MAXPATHLEN); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/dlm/dlmfs.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/dlm/dlmfs.c --- kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/dlm/dlmfs.c 2012-01-16 15:01:39.600726538 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/dlm/dlmfs.c 2012-01-16 14:51:21.865408883 +0100 @@ -43,6 +43,7 @@ #include #include #include +#include #include @@ -342,6 +343,7 @@ inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); + inode->i_tag = dx_current_fstag(sb); inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inc_nlink(inode); @@ -367,6 +369,7 @@ inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); + inode->i_tag = dx_current_fstag(sb); inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/dlmglue.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/dlmglue.c --- kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/dlmglue.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/dlmglue.c 2012-01-16 14:51:21.865408883 +0100 @@ -1991,6 +1991,7 @@ lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters); lvb->lvb_iuid = cpu_to_be32(inode->i_uid); lvb->lvb_igid = cpu_to_be32(inode->i_gid); + lvb->lvb_itag = cpu_to_be16(inode->i_tag); lvb->lvb_imode = cpu_to_be16(inode->i_mode); lvb->lvb_inlink = cpu_to_be16(inode->i_nlink); lvb->lvb_iatime_packed = @@ -2045,6 +2046,7 @@ inode->i_uid = be32_to_cpu(lvb->lvb_iuid); inode->i_gid = be32_to_cpu(lvb->lvb_igid); + inode->i_tag = be16_to_cpu(lvb->lvb_itag); inode->i_mode = be16_to_cpu(lvb->lvb_imode); inode->i_nlink = be16_to_cpu(lvb->lvb_inlink); ocfs2_unpack_timespec(&inode->i_atime, diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/dlmglue.h kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/dlmglue.h --- kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/dlmglue.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/dlmglue.h 2012-01-16 14:51:21.865408883 +0100 @@ -46,7 +46,8 @@ __be16 lvb_inlink; __be32 lvb_iattr; __be32 lvb_igeneration; - __be32 lvb_reserved2; + __be16 lvb_itag; + __be16 lvb_reserved2; }; #define OCFS2_QINFO_LVB_VERSION 1 diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/file.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/file.c --- kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/file.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/file.c 2012-01-16 14:51:21.869408869 +0100 @@ -960,13 +960,15 @@ mlog(0, "uid change: %d\n", attr->ia_uid); if (attr->ia_valid & ATTR_GID) mlog(0, "gid change: %d\n", attr->ia_gid); + if (attr->ia_valid & ATTR_TAG) + mlog(0, "tag change: %d\n", attr->ia_tag); if (attr->ia_valid & ATTR_SIZE) mlog(0, "size change...\n"); if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME)) mlog(0, "time change...\n"); #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \ - | ATTR_GID | ATTR_UID | ATTR_MODE) + | ATTR_GID | ATTR_UID | ATTR_TAG | ATTR_MODE) if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) { mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid); return 0; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/inode.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/inode.c --- kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/inode.c 2012-01-16 15:01:39.608726510 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/inode.c 2012-01-16 14:51:21.869408869 +0100 @@ -29,6 +29,7 @@ #include #include #include +#include #include @@ -79,11 +80,13 @@ { unsigned int flags = OCFS2_I(inode)->ip_attr; - inode->i_flags &= ~(S_IMMUTABLE | + inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK | S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC); if (flags & OCFS2_IMMUTABLE_FL) inode->i_flags |= S_IMMUTABLE; + if (flags & OCFS2_IXUNLINK_FL) + inode->i_flags |= S_IXUNLINK; if (flags & OCFS2_SYNC_FL) inode->i_flags |= S_SYNC; @@ -93,25 +96,44 @@ inode->i_flags |= S_NOATIME; if (flags & OCFS2_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; + + inode->i_vflags &= ~(V_BARRIER | V_COW); + + if (flags & OCFS2_BARRIER_FL) + inode->i_vflags |= V_BARRIER; + if (flags & OCFS2_COW_FL) + inode->i_vflags |= V_COW; } /* Propagate flags from i_flags to OCFS2_I(inode)->ip_attr */ void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi) { unsigned int flags = oi->vfs_inode.i_flags; + unsigned int vflags = oi->vfs_inode.i_vflags; + + oi->ip_attr &= ~(OCFS2_SYNC_FL | OCFS2_APPEND_FL | + OCFS2_IMMUTABLE_FL | OCFS2_IXUNLINK_FL | + OCFS2_NOATIME_FL | OCFS2_DIRSYNC_FL | + OCFS2_BARRIER_FL | OCFS2_COW_FL); + + if (flags & S_IMMUTABLE) + oi->ip_attr |= OCFS2_IMMUTABLE_FL; + if (flags & S_IXUNLINK) + oi->ip_attr |= OCFS2_IXUNLINK_FL; - oi->ip_attr &= ~(OCFS2_SYNC_FL|OCFS2_APPEND_FL| - OCFS2_IMMUTABLE_FL|OCFS2_NOATIME_FL|OCFS2_DIRSYNC_FL); if (flags & S_SYNC) oi->ip_attr |= OCFS2_SYNC_FL; if (flags & S_APPEND) oi->ip_attr |= OCFS2_APPEND_FL; - if (flags & S_IMMUTABLE) - oi->ip_attr |= OCFS2_IMMUTABLE_FL; if (flags & S_NOATIME) oi->ip_attr |= OCFS2_NOATIME_FL; if (flags & S_DIRSYNC) oi->ip_attr |= OCFS2_DIRSYNC_FL; + + if (vflags & V_BARRIER) + oi->ip_attr |= OCFS2_BARRIER_FL; + if (vflags & V_COW) + oi->ip_attr |= OCFS2_COW_FL; } struct inode *ocfs2_ilookup(struct super_block *sb, u64 blkno) @@ -246,6 +268,8 @@ struct super_block *sb; struct ocfs2_super *osb; int use_plocks = 1; + uid_t uid; + gid_t gid; mlog_entry("(0x%p, size:%llu)\n", inode, (unsigned long long)le64_to_cpu(fe->i_size)); @@ -277,8 +301,12 @@ inode->i_generation = le32_to_cpu(fe->i_generation); inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev)); inode->i_mode = le16_to_cpu(fe->i_mode); - inode->i_uid = le32_to_cpu(fe->i_uid); - inode->i_gid = le32_to_cpu(fe->i_gid); + uid = le32_to_cpu(fe->i_uid); + gid = le32_to_cpu(fe->i_gid); + inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid); + inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid); + inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid, + /* le16_to_cpu(raw_inode->i_raw_tag)i */ 0); /* Fast symlinks will have i_size but no allocated clusters. */ if (S_ISLNK(inode->i_mode) && !fe->i_clusters) diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/inode.h kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/inode.h --- kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/inode.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/inode.h 2012-01-16 14:51:21.873408855 +0100 @@ -150,6 +150,7 @@ void ocfs2_set_inode_flags(struct inode *inode); void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi); +int ocfs2_sync_flags(struct inode *inode, int, int); static inline blkcnt_t ocfs2_inode_sector_count(struct inode *inode) { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/ioctl.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/ioctl.c --- kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/ioctl.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/ioctl.c 2012-01-16 14:51:21.873408855 +0100 @@ -42,7 +42,41 @@ return status; } -static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags, +int ocfs2_sync_flags(struct inode *inode, int flags, int vflags) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct buffer_head *bh = NULL; + handle_t *handle = NULL; + int status; + + status = ocfs2_inode_lock(inode, &bh, 1); + if (status < 0) { + mlog_errno(status); + return status; + } + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto bail_unlock; + } + + inode->i_flags = flags; + inode->i_vflags = vflags; + ocfs2_get_inode_flags(OCFS2_I(inode)); + + status = ocfs2_mark_inode_dirty(handle, inode, bh); + if (status < 0) + mlog_errno(status); + + ocfs2_commit_trans(osb, handle); +bail_unlock: + ocfs2_inode_unlock(inode, 1); + brelse(bh); + return status; +} + +int ocfs2_set_inode_attr(struct inode *inode, unsigned flags, unsigned mask) { struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode); @@ -67,6 +101,11 @@ if (!S_ISDIR(inode->i_mode)) flags &= ~OCFS2_DIRSYNC_FL; + if (IS_BARRIER(inode)) { + vxwprintk_task(1, "messing with the barrier."); + goto bail_unlock; + } + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); @@ -108,6 +147,7 @@ return status; } + long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_path.dentry->d_inode; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/namei.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/namei.c --- kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/namei.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/namei.c 2012-01-16 14:51:21.873408855 +0100 @@ -41,6 +41,7 @@ #include #include #include +#include #define MLOG_MASK_PREFIX ML_NAMEI #include @@ -481,6 +482,7 @@ u64 fe_blkno = 0; u16 suballoc_bit; u16 feat; + tag_t tag; *new_fe_bh = NULL; @@ -524,8 +526,11 @@ fe->i_blkno = cpu_to_le64(fe_blkno); fe->i_suballoc_bit = cpu_to_le16(suballoc_bit); fe->i_suballoc_slot = cpu_to_le16(inode_ac->ac_alloc_slot); - fe->i_uid = cpu_to_le32(inode->i_uid); - fe->i_gid = cpu_to_le32(inode->i_gid); + + tag = dx_current_fstag(osb->sb); + fe->i_uid = cpu_to_le32(TAGINO_UID(DX_TAG(inode), inode->i_uid, tag)); + fe->i_gid = cpu_to_le32(TAGINO_GID(DX_TAG(inode), inode->i_gid, tag)); + inode->i_tag = tag; fe->i_mode = cpu_to_le16(inode->i_mode); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) fe->id1.dev1.i_rdev = cpu_to_le64(huge_encode_dev(dev)); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/ocfs2_fs.h kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/ocfs2_fs.h --- kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/ocfs2_fs.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/ocfs2_fs.h 2012-01-16 14:51:21.873408855 +0100 @@ -231,18 +231,23 @@ #define OCFS2_HAS_REFCOUNT_FL (0x0010) /* Inode attributes, keep in sync with EXT2 */ -#define OCFS2_SECRM_FL (0x00000001) /* Secure deletion */ -#define OCFS2_UNRM_FL (0x00000002) /* Undelete */ -#define OCFS2_COMPR_FL (0x00000004) /* Compress file */ -#define OCFS2_SYNC_FL (0x00000008) /* Synchronous updates */ -#define OCFS2_IMMUTABLE_FL (0x00000010) /* Immutable file */ -#define OCFS2_APPEND_FL (0x00000020) /* writes to file may only append */ -#define OCFS2_NODUMP_FL (0x00000040) /* do not dump file */ -#define OCFS2_NOATIME_FL (0x00000080) /* do not update atime */ -#define OCFS2_DIRSYNC_FL (0x00010000) /* dirsync behaviour (directories only) */ +#define OCFS2_SECRM_FL FS_SECRM_FL /* Secure deletion */ +#define OCFS2_UNRM_FL FS_UNRM_FL /* Undelete */ +#define OCFS2_COMPR_FL FS_COMPR_FL /* Compress file */ +#define OCFS2_SYNC_FL FS_SYNC_FL /* Synchronous updates */ +#define OCFS2_IMMUTABLE_FL FS_IMMUTABLE_FL /* Immutable file */ +#define OCFS2_APPEND_FL FS_APPEND_FL /* writes to file may only append */ +#define OCFS2_NODUMP_FL FS_NODUMP_FL /* do not dump file */ +#define OCFS2_NOATIME_FL FS_NOATIME_FL /* do not update atime */ -#define OCFS2_FL_VISIBLE (0x000100FF) /* User visible flags */ -#define OCFS2_FL_MODIFIABLE (0x000100FF) /* User modifiable flags */ +#define OCFS2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */ +#define OCFS2_IXUNLINK_FL FS_IXUNLINK_FL /* Immutable invert on unlink */ + +#define OCFS2_BARRIER_FL FS_BARRIER_FL /* Barrier for chroot() */ +#define OCFS2_COW_FL FS_COW_FL /* Copy on Write marker */ + +#define OCFS2_FL_VISIBLE (0x010300FF) /* User visible flags */ +#define OCFS2_FL_MODIFIABLE (0x010300FF) /* User modifiable flags */ /* * Extent record flags (e_node.leaf.flags) diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/ocfs2.h kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/ocfs2.h --- kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/ocfs2.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/ocfs2.h 2012-01-16 14:51:21.873408855 +0100 @@ -248,6 +248,7 @@ OCFS2_MOUNT_POSIX_ACL = 1 << 8, /* POSIX access control lists */ OCFS2_MOUNT_USRQUOTA = 1 << 9, /* We support user quotas */ OCFS2_MOUNT_GRPQUOTA = 1 << 10, /* We support group quotas */ + OCFS2_MOUNT_TAGGED = 1 << 11, /* use tagging */ }; #define OCFS2_OSB_SOFT_RO 0x0001 diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/super.c kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/super.c --- kernel-2.6.32.54/linux-2.6.32/fs/ocfs2/super.c 2012-01-16 15:01:39.616726482 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/ocfs2/super.c 2012-01-16 14:51:21.873408855 +0100 @@ -173,6 +173,7 @@ Opt_noacl, Opt_usrquota, Opt_grpquota, + Opt_tag, Opt_notag, Opt_tagid, Opt_err, }; @@ -199,6 +200,9 @@ {Opt_noacl, "noacl"}, {Opt_usrquota, "usrquota"}, {Opt_grpquota, "grpquota"}, + {Opt_tag, "tag"}, + {Opt_notag, "notag"}, + {Opt_tagid, "tagid=%u"}, {Opt_err, NULL} }; @@ -605,6 +609,13 @@ goto out; } + if ((osb->s_mount_opt & OCFS2_MOUNT_TAGGED) != + (parsed_options.mount_opt & OCFS2_MOUNT_TAGGED)) { + ret = -EINVAL; + mlog(ML_ERROR, "Cannot change tagging on remount\n"); + goto out; + } + if ((osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) != (parsed_options.mount_opt & OCFS2_MOUNT_HB_LOCAL)) { ret = -EINVAL; @@ -1152,6 +1163,9 @@ ocfs2_complete_mount_recovery(osb); + if (osb->s_mount_opt & OCFS2_MOUNT_TAGGED) + sb->s_flags |= MS_TAGGED; + if (ocfs2_mount_local(osb)) snprintf(nodestr, sizeof(nodestr), "local"); else @@ -1430,6 +1444,20 @@ printk(KERN_INFO "ocfs2 (no)acl options not supported\n"); break; #endif +#ifndef CONFIG_TAGGING_NONE + case Opt_tag: + mopt->mount_opt |= OCFS2_MOUNT_TAGGED; + break; + case Opt_notag: + mopt->mount_opt &= ~OCFS2_MOUNT_TAGGED; + break; +#endif +#ifdef CONFIG_PROPAGATE + case Opt_tagid: + /* use args[0] */ + mopt->mount_opt |= OCFS2_MOUNT_TAGGED; + break; +#endif default: mlog(ML_ERROR, "Unrecognized mount option \"%s\" " diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/open.c kernel-2.6.32.54.vs/linux-2.6.32/fs/open.c --- kernel-2.6.32.54/linux-2.6.32/fs/open.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/open.c 2012-01-16 14:51:21.873408855 +0100 @@ -30,22 +30,30 @@ #include #include #include +#include +#include +#include +#include int vfs_statfs(struct dentry *dentry, struct kstatfs *buf) { int retval = -ENODEV; if (dentry) { + struct super_block *sb = dentry->d_sb; + retval = -ENOSYS; - if (dentry->d_sb->s_op->statfs) { + if (sb->s_op->statfs) { memset(buf, 0, sizeof(*buf)); retval = security_sb_statfs(dentry); if (retval) return retval; - retval = dentry->d_sb->s_op->statfs(dentry, buf); + retval = sb->s_op->statfs(dentry, buf); if (retval == 0 && buf->f_frsize == 0) buf->f_frsize = buf->f_bsize; } + if (!vx_check(0, VS_ADMIN|VS_WATCH)) + vx_vsi_statfs(sb, buf); } return retval; } @@ -640,6 +648,10 @@ error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path); if (error) goto out; + + error = cow_check_and_break(&path); + if (error) + goto dput_and_out; inode = path.dentry->d_inode; error = mnt_want_write(path.mnt); @@ -673,11 +685,11 @@ newattrs.ia_valid = ATTR_CTIME; if (user != (uid_t) -1) { newattrs.ia_valid |= ATTR_UID; - newattrs.ia_uid = user; + newattrs.ia_uid = dx_map_uid(user); } if (group != (gid_t) -1) { newattrs.ia_valid |= ATTR_GID; - newattrs.ia_gid = group; + newattrs.ia_gid = dx_map_gid(group); } if (!S_ISDIR(inode->i_mode)) newattrs.ia_valid |= @@ -700,7 +712,11 @@ error = mnt_want_write(path.mnt); if (error) goto out_release; - error = chown_common(path.dentry, user, group); +#ifdef CONFIG_VSERVER_COWBL + error = cow_check_and_break(&path); + if (!error) +#endif + error = chown_common(path.dentry, user, group); mnt_drop_write(path.mnt); out_release: path_put(&path); @@ -725,7 +741,11 @@ error = mnt_want_write(path.mnt); if (error) goto out_release; - error = chown_common(path.dentry, user, group); +#ifdef CONFIG_VSERVER_COWBL + error = cow_check_and_break(&path); + if (!error) +#endif + error = chown_common(path.dentry, user, group); mnt_drop_write(path.mnt); out_release: path_put(&path); @@ -744,7 +764,11 @@ error = mnt_want_write(path.mnt); if (error) goto out_release; - error = chown_common(path.dentry, user, group); +#ifdef CONFIG_VSERVER_COWBL + error = cow_check_and_break(&path); + if (!error) +#endif + error = chown_common(path.dentry, user, group); mnt_drop_write(path.mnt); out_release: path_put(&path); @@ -990,6 +1014,7 @@ __FD_CLR(fd, fdt->open_fds); if (fd < files->next_fd) files->next_fd = fd; + vx_openfd_dec(fd); } void put_unused_fd(unsigned int fd) diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/proc/array.c kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/array.c --- kernel-2.6.32.54/linux-2.6.32/fs/proc/array.c 2012-01-16 15:01:39.636726411 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/array.c 2012-01-16 14:51:21.881408827 +0100 @@ -82,6 +82,8 @@ #include #include #include +#include +#include #include #include @@ -166,6 +168,9 @@ rcu_read_lock(); ppid = pid_alive(p) ? task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0; + if (unlikely(vx_current_initpid(p->pid))) + ppid = 0; + tpid = 0; if (pid_alive(p)) { struct task_struct *tracer = tracehook_tracer_task(p); @@ -281,7 +286,7 @@ } static void render_cap_t(struct seq_file *m, const char *header, - kernel_cap_t *a) + struct vx_info *vxi, kernel_cap_t *a) { unsigned __capi; @@ -306,10 +311,11 @@ cap_bset = cred->cap_bset; rcu_read_unlock(); - render_cap_t(m, "CapInh:\t", &cap_inheritable); - render_cap_t(m, "CapPrm:\t", &cap_permitted); - render_cap_t(m, "CapEff:\t", &cap_effective); - render_cap_t(m, "CapBnd:\t", &cap_bset); + /* FIXME: maybe move the p->vx_info masking to __task_cred() ? */ + render_cap_t(m, "CapInh:\t", p->vx_info, &cap_inheritable); + render_cap_t(m, "CapPrm:\t", p->vx_info, &cap_permitted); + render_cap_t(m, "CapEff:\t", p->vx_info, &cap_effective); + render_cap_t(m, "CapBnd:\t", p->vx_info, &cap_bset); } static inline void task_context_switch_counts(struct seq_file *m, @@ -321,6 +327,43 @@ p->nivcsw); } + +int proc_pid_nsproxy(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) +{ + seq_printf(m, "Proxy:\t%p(%c)\n" + "Count:\t%u\n" + "uts:\t%p(%c)\n" + "ipc:\t%p(%c)\n" + "mnt:\t%p(%c)\n" + "pid:\t%p(%c)\n" + "net:\t%p(%c)\n", + task->nsproxy, + (task->nsproxy == init_task.nsproxy ? 'I' : '-'), + atomic_read(&task->nsproxy->count), + task->nsproxy->uts_ns, + (task->nsproxy->uts_ns == init_task.nsproxy->uts_ns ? 'I' : '-'), + task->nsproxy->ipc_ns, + (task->nsproxy->ipc_ns == init_task.nsproxy->ipc_ns ? 'I' : '-'), + task->nsproxy->mnt_ns, + (task->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns ? 'I' : '-'), + task->nsproxy->pid_ns, + (task->nsproxy->pid_ns == init_task.nsproxy->pid_ns ? 'I' : '-'), + task->nsproxy->net_ns, + (task->nsproxy->net_ns == init_task.nsproxy->net_ns ? 'I' : '-')); + return 0; +} + +void task_vs_id(struct seq_file *m, struct task_struct *task) +{ + if (task_vx_flags(task, VXF_HIDE_VINFO, 0)) + return; + + seq_printf(m, "VxID: %d\n", vx_task_xid(task)); + seq_printf(m, "NxID: %d\n", nx_task_nid(task)); +} + + int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { @@ -336,6 +379,7 @@ task_sig(m, task); task_cap(m, task); cpuset_task_status_allowed(m, task); + task_vs_id(m, task); task_context_switch_counts(m, task); return 0; } @@ -446,6 +490,17 @@ /* convert nsec -> ticks */ start_time = nsec_to_clock_t(start_time); + /* fixup start time for virt uptime */ + if (vx_flags(VXF_VIRT_UPTIME, 0)) { + unsigned long long bias = + current->vx_info->cvirt.bias_clock; + + if (start_time > bias) + start_time -= bias; + else + start_time = 0; + } + seq_printf(m, "%d (%s) %c %d %d %d %d %d %u %lu \ %lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \ %lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu %lu %ld\n", diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/proc/base.c kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/base.c --- kernel-2.6.32.54/linux-2.6.32/fs/proc/base.c 2012-01-16 15:01:39.636726411 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/base.c 2012-01-16 14:51:21.881408827 +0100 @@ -81,6 +81,8 @@ #include #include #include +#include +#include #include "internal.h" /* NOTE: @@ -1075,12 +1077,17 @@ return -ESRCH; } - if (oom_adjust < task->signal->oom_adj && !capable(CAP_SYS_RESOURCE)) { + if (oom_adjust < task->signal->oom_adj && + !vx_capable(CAP_SYS_RESOURCE, VXC_OOM_ADJUST)) { unlock_task_sighand(task, &flags); put_task_struct(task); return -EACCES; } + /* prevent guest processes from circumventing the oom killer */ + if (vx_current_xid() && (oom_adjust == OOM_DISABLE)) + oom_adjust = OOM_ADJUST_MIN; + task->signal->oom_adj = oom_adjust; unlock_task_sighand(task, &flags); @@ -1120,7 +1127,7 @@ ssize_t length; uid_t loginuid; - if (!capable(CAP_AUDIT_CONTROL)) + if (!vx_capable(CAP_AUDIT_CONTROL, VXC_AUDIT_CONTROL)) return -EPERM; if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) @@ -1486,6 +1493,8 @@ inode->i_gid = cred->egid; rcu_read_unlock(); } + /* procfs is xid tagged */ + inode->i_tag = (tag_t)vx_task_xid(task); security_task_to_inode(task, inode); out: @@ -2036,6 +2045,13 @@ if (!task) goto out_no_task; + /* TODO: maybe we can come up with a generic approach? */ + if (task_vx_flags(task, VXF_HIDE_VINFO, 0) && + (dentry->d_name.len == 5) && + (!memcmp(dentry->d_name.name, "vinfo", 5) || + !memcmp(dentry->d_name.name, "ninfo", 5))) + goto out; + /* * Yes, it does not scale. And it should not. Don't add * new entries into /proc// without very good reasons. @@ -2441,7 +2457,7 @@ static struct dentry *proc_base_lookup(struct inode *dir, struct dentry *dentry) { struct dentry *error; - struct task_struct *task = get_proc_task(dir); + struct task_struct *task = get_proc_task_real(dir); const struct pid_entry *p, *last; error = ERR_PTR(-ENOENT); @@ -2538,6 +2554,9 @@ static const struct file_operations proc_task_operations; static const struct inode_operations proc_task_inode_operations; +extern int proc_pid_vx_info(struct task_struct *, char *); +extern int proc_pid_nx_info(struct task_struct *, char *); + static const struct pid_entry tgid_base_stuff[] = { DIR("task", S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations), DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), @@ -2596,6 +2615,8 @@ #ifdef CONFIG_CGROUPS REG("cgroup", S_IRUGO, proc_cgroup_operations), #endif + INF("vinfo", S_IRUGO, proc_pid_vx_info), + INF("ninfo", S_IRUGO, proc_pid_nx_info), INF("oom_score", S_IRUGO, proc_oom_score), REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adjust_operations), #ifdef CONFIG_AUDITSYSCALL @@ -2611,6 +2632,7 @@ #ifdef CONFIG_TASK_IO_ACCOUNTING INF("io", S_IRUSR, proc_tgid_io_accounting), #endif + ONE("nsproxy", S_IRUGO, proc_pid_nsproxy), }; static int proc_tgid_base_readdir(struct file * filp, @@ -2802,7 +2824,7 @@ iter.task = NULL; pid = find_ge_pid(iter.tgid, ns); if (pid) { - iter.tgid = pid_nr_ns(pid, ns); + iter.tgid = pid_unmapped_nr_ns(pid, ns); iter.task = pid_task(pid, PIDTYPE_PID); /* What we to know is if the pid we have find is the * pid of a thread_group_leader. Testing for task @@ -2832,7 +2854,7 @@ struct tgid_iter iter) { char name[PROC_NUMBUF]; - int len = snprintf(name, sizeof(name), "%d", iter.tgid); + int len = snprintf(name, sizeof(name), "%d", vx_map_tgid(iter.tgid)); return proc_fill_cache(filp, dirent, filldir, name, len, proc_pid_instantiate, iter.task, NULL); } @@ -2849,7 +2871,7 @@ goto out_no_task; nr = filp->f_pos - FIRST_PROCESS_ENTRY; - reaper = get_proc_task(filp->f_path.dentry->d_inode); + reaper = get_proc_task_real(filp->f_path.dentry->d_inode); if (!reaper) goto out_no_task; @@ -2866,6 +2888,8 @@ iter.task; iter.tgid += 1, iter = next_tgid(ns, iter)) { filp->f_pos = iter.tgid + TGID_OFFSET; + if (!vx_proc_task_visible(iter.task)) + continue; if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) { put_task_struct(iter.task); goto out; @@ -3012,6 +3036,8 @@ tid = name_to_int(dentry); if (tid == ~0U) goto out; + if (vx_current_initpid(tid)) + goto out; ns = dentry->d_sb->s_fs_info; rcu_read_lock(); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/proc/generic.c kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/generic.c --- kernel-2.6.32.54/linux-2.6.32/fs/proc/generic.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/generic.c 2012-01-16 14:51:21.885408813 +0100 @@ -20,6 +20,7 @@ #include #include #include +#include #include #include "internal.h" @@ -425,6 +426,8 @@ for (de = de->subdir; de ; de = de->next) { if (de->namelen != dentry->d_name.len) continue; + if (!vx_hide_check(0, de->vx_flags)) + continue; if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { unsigned int ino; @@ -433,6 +436,8 @@ spin_unlock(&proc_subdir_lock); error = -EINVAL; inode = proc_get_inode(dir->i_sb, ino, de); + /* generic proc entries belong to the host */ + inode->i_tag = 0; goto out_unlock; } } @@ -510,6 +515,8 @@ /* filldir passes info to user space */ de_get(de); + if (!vx_hide_check(0, de->vx_flags)) + goto skip; spin_unlock(&proc_subdir_lock); if (filldir(dirent, de->name, de->namelen, filp->f_pos, de->low_ino, de->mode >> 12) < 0) { @@ -517,6 +524,7 @@ goto out; } spin_lock(&proc_subdir_lock); + skip: filp->f_pos++; next = de->next; de_put(de); @@ -631,6 +639,7 @@ ent->nlink = nlink; atomic_set(&ent->count, 1); ent->pde_users = 0; + ent->vx_flags = IATTR_PROC_DEFAULT; spin_lock_init(&ent->pde_unload_lock); ent->pde_unload_completion = NULL; INIT_LIST_HEAD(&ent->pde_openers); @@ -654,7 +663,8 @@ kfree(ent->data); kfree(ent); ent = NULL; - } + } else + ent->vx_flags = IATTR_PROC_SYMLINK; } else { kfree(ent); ent = NULL; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/proc/inode.c kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/inode.c --- kernel-2.6.32.54/linux-2.6.32/fs/proc/inode.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/inode.c 2012-01-16 14:51:21.885408813 +0100 @@ -459,6 +459,8 @@ inode->i_uid = de->uid; inode->i_gid = de->gid; } + if (de->vx_flags) + PROC_I(inode)->vx_flags = de->vx_flags; if (de->size) inode->i_size = de->size; if (de->nlink) diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/proc/internal.h kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/internal.h --- kernel-2.6.32.54/linux-2.6.32/fs/proc/internal.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/internal.h 2012-01-16 14:51:21.885408813 +0100 @@ -10,6 +10,7 @@ */ #include +#include extern struct proc_dir_entry proc_root; #ifdef CONFIG_PROC_SYSCTL @@ -51,6 +52,9 @@ struct pid *pid, struct task_struct *task); extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task); +extern int proc_pid_nsproxy(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task); + extern loff_t mem_lseek(struct file *file, loff_t offset, int orig); extern const struct file_operations proc_maps_operations; @@ -70,11 +74,16 @@ return PROC_I(inode)->pid; } -static inline struct task_struct *get_proc_task(struct inode *inode) +static inline struct task_struct *get_proc_task_real(struct inode *inode) { return get_pid_task(proc_pid(inode), PIDTYPE_PID); } +static inline struct task_struct *get_proc_task(struct inode *inode) +{ + return vx_get_proc_task(inode, proc_pid(inode)); +} + static inline int proc_fd(struct inode *inode) { return PROC_I(inode)->fd; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/proc/loadavg.c kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/loadavg.c --- kernel-2.6.32.54/linux-2.6.32/fs/proc/loadavg.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/loadavg.c 2012-01-16 14:51:21.885408813 +0100 @@ -12,15 +12,27 @@ static int loadavg_proc_show(struct seq_file *m, void *v) { + unsigned long running; + unsigned int threads; unsigned long avnrun[3]; get_avenrun(avnrun, FIXED_1/200, 0); + if (vx_flags(VXF_VIRT_LOAD, 0)) { + struct vx_info *vxi = current_vx_info(); + + running = atomic_read(&vxi->cvirt.nr_running); + threads = atomic_read(&vxi->cvirt.nr_threads); + } else { + running = nr_running(); + threads = nr_threads; + } + seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n", LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]), LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), - nr_running(), nr_threads, + running, threads, task_active_pid_ns(current)->last_pid); return 0; } diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/proc/meminfo.c kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/meminfo.c --- kernel-2.6.32.54/linux-2.6.32/fs/proc/meminfo.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/meminfo.c 2012-01-16 14:51:21.885408813 +0100 @@ -39,7 +39,8 @@ allowed = ((totalram_pages - hugetlb_total_pages()) * sysctl_overcommit_ratio / 100) + total_swap_pages; - cached = global_page_state(NR_FILE_PAGES) - + cached = vx_flags(VXF_VIRT_MEM, 0) ? + vx_vsi_cached(&i) : global_page_state(NR_FILE_PAGES) - total_swapcache_pages - i.bufferram; if (cached < 0) cached = 0; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/proc/root.c kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/root.c --- kernel-2.6.32.54/linux-2.6.32/fs/proc/root.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/root.c 2012-01-16 14:51:21.885408813 +0100 @@ -18,9 +18,14 @@ #include #include #include +#include #include "internal.h" +struct proc_dir_entry *proc_virtual; + +extern void proc_vx_init(void); + static int proc_test_super(struct super_block *sb, void *data) { return sb->s_fs_info == data; @@ -136,6 +141,7 @@ #endif proc_mkdir("bus", NULL); proc_sys_init(); + proc_vx_init(); } static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat @@ -203,6 +209,7 @@ .proc_iops = &proc_root_inode_operations, .proc_fops = &proc_root_operations, .parent = &proc_root, + .vx_flags = IATTR_ADMIN | IATTR_WATCH, }; int pid_ns_prepare_proc(struct pid_namespace *ns) diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/proc/uptime.c kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/uptime.c --- kernel-2.6.32.54/linux-2.6.32/fs/proc/uptime.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/proc/uptime.c 2012-01-16 14:51:21.885408813 +0100 @@ -4,22 +4,22 @@ #include #include #include -#include +#include #include static int uptime_proc_show(struct seq_file *m, void *v) { struct timespec uptime; struct timespec idle; - int i; - cputime_t idletime = cputime_zero; - - for_each_possible_cpu(i) - idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle); + cputime_t idletime = cputime_add(init_task.utime, init_task.stime); do_posix_clock_monotonic_gettime(&uptime); monotonic_to_bootbased(&uptime); cputime_to_timespec(idletime, &idle); + + if (vx_flags(VXF_VIRT_UPTIME, 0)) + vx_vsi_uptime(&uptime, &idle); + seq_printf(m, "%lu.%02lu %lu.%02lu\n", (unsigned long) uptime.tv_sec, (uptime.tv_nsec / (NSEC_PER_SEC / 100)), diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/quota/quota.c kernel-2.6.32.54.vs/linux-2.6.32/fs/quota/quota.c --- kernel-2.6.32.54/linux-2.6.32/fs/quota/quota.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/quota/quota.c 2012-01-16 14:51:21.885408813 +0100 @@ -18,6 +18,7 @@ #include #include #include +#include /* Check validity of generic quotactl commands */ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, @@ -83,11 +84,11 @@ if (cmd == Q_GETQUOTA) { if (((type == USRQUOTA && current_euid() != id) || (type == GRPQUOTA && !in_egroup_p(id))) && - !capable(CAP_SYS_ADMIN)) + !vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL)) return -EPERM; } else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO) - if (!capable(CAP_SYS_ADMIN)) + if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL)) return -EPERM; return 0; @@ -135,10 +136,10 @@ if (cmd == Q_XGETQUOTA) { if (((type == XQM_USRQUOTA && current_euid() != id) || (type == XQM_GRPQUOTA && !in_egroup_p(id))) && - !capable(CAP_SYS_ADMIN)) + !vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL)) return -EPERM; } else if (cmd != Q_XGETQSTAT && cmd != Q_XQUOTASYNC) { - if (!capable(CAP_SYS_ADMIN)) + if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL)) return -EPERM; } @@ -351,6 +352,46 @@ return 0; } +#if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE) + +#include +#include +#include +#include +#include + +static vroot_grb_func *vroot_get_real_bdev = NULL; + +static spinlock_t vroot_grb_lock = SPIN_LOCK_UNLOCKED; + +int register_vroot_grb(vroot_grb_func *func) { + int ret = -EBUSY; + + spin_lock(&vroot_grb_lock); + if (!vroot_get_real_bdev) { + vroot_get_real_bdev = func; + ret = 0; + } + spin_unlock(&vroot_grb_lock); + return ret; +} +EXPORT_SYMBOL(register_vroot_grb); + +int unregister_vroot_grb(vroot_grb_func *func) { + int ret = -EINVAL; + + spin_lock(&vroot_grb_lock); + if (vroot_get_real_bdev) { + vroot_get_real_bdev = NULL; + ret = 0; + } + spin_unlock(&vroot_grb_lock); + return ret; +} +EXPORT_SYMBOL(unregister_vroot_grb); + +#endif + /* * look up a superblock on which quota ops will be performed * - use the name of a block device to find the superblock thereon @@ -368,6 +409,22 @@ putname(tmp); if (IS_ERR(bdev)) return ERR_CAST(bdev); +#if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE) + if (bdev && bdev->bd_inode && + imajor(bdev->bd_inode) == VROOT_MAJOR) { + struct block_device *bdnew = (void *)-EINVAL; + + if (vroot_get_real_bdev) + bdnew = vroot_get_real_bdev(bdev); + else + vxdprintk(VXD_CBIT(misc, 0), + "vroot_get_real_bdev not set"); + bdput(bdev); + if (IS_ERR(bdnew)) + return ERR_PTR(PTR_ERR(bdnew)); + bdev = bdnew; + } +#endif sb = get_super(bdev); bdput(bdev); if (!sb) diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/reiserfs/file.c kernel-2.6.32.54.vs/linux-2.6.32/fs/reiserfs/file.c --- kernel-2.6.32.54/linux-2.6.32/fs/reiserfs/file.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/reiserfs/file.c 2012-01-16 14:51:21.885408813 +0100 @@ -307,4 +307,5 @@ .listxattr = reiserfs_listxattr, .removexattr = reiserfs_removexattr, .permission = reiserfs_permission, + .sync_flags = reiserfs_sync_flags, }; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/reiserfs/inode.c kernel-2.6.32.54.vs/linux-2.6.32/fs/reiserfs/inode.c --- kernel-2.6.32.54/linux-2.6.32/fs/reiserfs/inode.c 2012-01-16 15:01:39.640726397 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/reiserfs/inode.c 2012-01-16 14:51:21.893408785 +0100 @@ -18,6 +18,7 @@ #include #include #include +#include int reiserfs_commit_write(struct file *f, struct page *page, unsigned from, unsigned to); @@ -1117,6 +1118,8 @@ struct buffer_head *bh; struct item_head *ih; __u32 rdev; + uid_t uid; + gid_t gid; //int version = ITEM_VERSION_1; bh = PATH_PLAST_BUFFER(path); @@ -1138,12 +1141,13 @@ (struct stat_data_v1 *)B_I_PITEM(bh, ih); unsigned long blocks; + uid = sd_v1_uid(sd); + gid = sd_v1_gid(sd); + set_inode_item_key_version(inode, KEY_FORMAT_3_5); set_inode_sd_version(inode, STAT_DATA_V1); inode->i_mode = sd_v1_mode(sd); inode->i_nlink = sd_v1_nlink(sd); - inode->i_uid = sd_v1_uid(sd); - inode->i_gid = sd_v1_gid(sd); inode->i_size = sd_v1_size(sd); inode->i_atime.tv_sec = sd_v1_atime(sd); inode->i_mtime.tv_sec = sd_v1_mtime(sd); @@ -1185,11 +1189,12 @@ // (directories and symlinks) struct stat_data *sd = (struct stat_data *)B_I_PITEM(bh, ih); + uid = sd_v2_uid(sd); + gid = sd_v2_gid(sd); + inode->i_mode = sd_v2_mode(sd); inode->i_nlink = sd_v2_nlink(sd); - inode->i_uid = sd_v2_uid(sd); inode->i_size = sd_v2_size(sd); - inode->i_gid = sd_v2_gid(sd); inode->i_mtime.tv_sec = sd_v2_mtime(sd); inode->i_atime.tv_sec = sd_v2_atime(sd); inode->i_ctime.tv_sec = sd_v2_ctime(sd); @@ -1219,6 +1224,10 @@ sd_attrs_to_i_attrs(sd_v2_attrs(sd), inode); } + inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid); + inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid); + inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid, 0); + pathrelse(path); if (S_ISREG(inode->i_mode)) { inode->i_op = &reiserfs_file_inode_operations; @@ -1241,13 +1250,15 @@ static void inode2sd(void *sd, struct inode *inode, loff_t size) { struct stat_data *sd_v2 = (struct stat_data *)sd; + uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag); + gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag); __u16 flags; + set_sd_v2_uid(sd_v2, uid); + set_sd_v2_gid(sd_v2, gid); set_sd_v2_mode(sd_v2, inode->i_mode); set_sd_v2_nlink(sd_v2, inode->i_nlink); - set_sd_v2_uid(sd_v2, inode->i_uid); set_sd_v2_size(sd_v2, size); - set_sd_v2_gid(sd_v2, inode->i_gid); set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec); set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec); set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec); @@ -2839,14 +2850,19 @@ void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode) { if (reiserfs_attrs(inode->i_sb)) { - if (sd_attrs & REISERFS_SYNC_FL) - inode->i_flags |= S_SYNC; - else - inode->i_flags &= ~S_SYNC; if (sd_attrs & REISERFS_IMMUTABLE_FL) inode->i_flags |= S_IMMUTABLE; else inode->i_flags &= ~S_IMMUTABLE; + if (sd_attrs & REISERFS_IXUNLINK_FL) + inode->i_flags |= S_IXUNLINK; + else + inode->i_flags &= ~S_IXUNLINK; + + if (sd_attrs & REISERFS_SYNC_FL) + inode->i_flags |= S_SYNC; + else + inode->i_flags &= ~S_SYNC; if (sd_attrs & REISERFS_APPEND_FL) inode->i_flags |= S_APPEND; else @@ -2859,6 +2875,15 @@ REISERFS_I(inode)->i_flags |= i_nopack_mask; else REISERFS_I(inode)->i_flags &= ~i_nopack_mask; + + if (sd_attrs & REISERFS_BARRIER_FL) + inode->i_vflags |= V_BARRIER; + else + inode->i_vflags &= ~V_BARRIER; + if (sd_attrs & REISERFS_COW_FL) + inode->i_vflags |= V_COW; + else + inode->i_vflags &= ~V_COW; } } @@ -2869,6 +2894,11 @@ *sd_attrs |= REISERFS_IMMUTABLE_FL; else *sd_attrs &= ~REISERFS_IMMUTABLE_FL; + if (inode->i_flags & S_IXUNLINK) + *sd_attrs |= REISERFS_IXUNLINK_FL; + else + *sd_attrs &= ~REISERFS_IXUNLINK_FL; + if (inode->i_flags & S_SYNC) *sd_attrs |= REISERFS_SYNC_FL; else @@ -2881,6 +2911,15 @@ *sd_attrs |= REISERFS_NOTAIL_FL; else *sd_attrs &= ~REISERFS_NOTAIL_FL; + + if (inode->i_vflags & V_BARRIER) + *sd_attrs |= REISERFS_BARRIER_FL; + else + *sd_attrs &= ~REISERFS_BARRIER_FL; + if (inode->i_vflags & V_COW) + *sd_attrs |= REISERFS_COW_FL; + else + *sd_attrs &= ~REISERFS_COW_FL; } } @@ -3101,9 +3140,11 @@ } error = inode_change_ok(inode, attr); + if (!error) { if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || - (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { + (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) || + (ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag)) { error = reiserfs_chown_xattrs(inode, attr); if (!error) { @@ -3133,6 +3174,9 @@ inode->i_uid = attr->ia_uid; if (attr->ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; + if ((attr->ia_valid & ATTR_TAG) && + IS_TAGGED(inode)) + inode->i_tag = attr->ia_tag; mark_inode_dirty(inode); error = journal_end(&th, inode->i_sb, jbegin_count); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/reiserfs/ioctl.c kernel-2.6.32.54.vs/linux-2.6.32/fs/reiserfs/ioctl.c --- kernel-2.6.32.54/linux-2.6.32/fs/reiserfs/ioctl.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/reiserfs/ioctl.c 2012-01-16 14:51:21.893408785 +0100 @@ -7,11 +7,27 @@ #include #include #include +#include #include #include #include #include + +int reiserfs_sync_flags(struct inode *inode, int flags, int vflags) +{ + __u16 sd_attrs = 0; + + inode->i_flags = flags; + inode->i_vflags = vflags; + + i_attrs_to_sd_attrs(inode, &sd_attrs); + REISERFS_I(inode)->i_attrs = sd_attrs; + inode->i_ctime = CURRENT_TIME_SEC; + mark_inode_dirty(inode); + return 0; +} + /* ** reiserfs_ioctl - handler for ioctl for inode ** supported commands: @@ -23,7 +39,7 @@ int reiserfs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - unsigned int flags; + unsigned int flags, oldflags; int err = 0; switch (cmd) { @@ -43,6 +59,7 @@ flags = REISERFS_I(inode)->i_attrs; i_attrs_to_sd_attrs(inode, (__u16 *) & flags); + flags &= REISERFS_FL_USER_VISIBLE; return put_user(flags, (int __user *)arg); case REISERFS_IOC_SETFLAGS:{ if (!reiserfs_attrs(inode->i_sb)) @@ -60,6 +77,10 @@ err = -EFAULT; goto setflags_out; } + if (IS_BARRIER(inode)) { + vxwprintk_task(1, "messing with the barrier."); + return -EACCES; + } /* * Is it quota file? Do not allow user to mess with it */ @@ -84,6 +105,10 @@ goto setflags_out; } } + + oldflags = REISERFS_I(inode)->i_attrs; + flags &= REISERFS_FL_USER_MODIFIABLE; + flags |= oldflags & ~REISERFS_FL_USER_MODIFIABLE; sd_attrs_to_i_attrs(flags, inode); REISERFS_I(inode)->i_attrs = flags; inode->i_ctime = CURRENT_TIME_SEC; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/reiserfs/namei.c kernel-2.6.32.54.vs/linux-2.6.32/fs/reiserfs/namei.c --- kernel-2.6.32.54/linux-2.6.32/fs/reiserfs/namei.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/reiserfs/namei.c 2012-01-16 14:51:21.893408785 +0100 @@ -17,6 +17,7 @@ #include #include #include +#include #define INC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) { inc_nlink(i); if (i->i_nlink >= REISERFS_LINK_MAX) i->i_nlink=1; } #define DEC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) drop_nlink(i); @@ -354,6 +355,7 @@ if (retval == IO_ERROR) { return ERR_PTR(-EIO); } + dx_propagate_tag(nd, inode); return d_splice_alias(inode, dentry); } @@ -570,6 +572,7 @@ } else { inode->i_gid = current_fsgid(); } + inode->i_tag = dx_current_fstag(inode->i_sb); vfs_dq_init(inode); return 0; } @@ -1515,6 +1518,7 @@ .listxattr = reiserfs_listxattr, .removexattr = reiserfs_removexattr, .permission = reiserfs_permission, + .sync_flags = reiserfs_sync_flags, }; /* diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/reiserfs/super.c kernel-2.6.32.54.vs/linux-2.6.32/fs/reiserfs/super.c --- kernel-2.6.32.54/linux-2.6.32/fs/reiserfs/super.c 2012-01-16 15:01:39.640726397 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/reiserfs/super.c 2012-01-16 14:51:21.897408771 +0100 @@ -888,6 +888,14 @@ {"user_xattr",.setmask = 1 << REISERFS_UNSUPPORTED_OPT}, {"nouser_xattr",.clrmask = 1 << REISERFS_UNSUPPORTED_OPT}, #endif +#ifndef CONFIG_TAGGING_NONE + {"tagxid",.setmask = 1 << REISERFS_TAGGED}, + {"tag",.setmask = 1 << REISERFS_TAGGED}, + {"notag",.clrmask = 1 << REISERFS_TAGGED}, +#endif +#ifdef CONFIG_PROPAGATE + {"tag",.arg_required = 'T',.values = NULL}, +#endif #ifdef CONFIG_REISERFS_FS_POSIX_ACL {"acl",.setmask = 1 << REISERFS_POSIXACL}, {"noacl",.clrmask = 1 << REISERFS_POSIXACL}, @@ -1195,6 +1203,14 @@ handle_quota_files(s, qf_names, &qfmt); #endif + if ((mount_options & (1 << REISERFS_TAGGED)) && + !(s->s_flags & MS_TAGGED)) { + reiserfs_warning(s, "super-vs01", + "reiserfs: tagging not permitted on remount."); + err = -EINVAL; + goto out_err; + } + handle_attrs(s); /* Add options that are safe here */ @@ -1657,6 +1673,10 @@ goto error; } + /* map mount option tagxid */ + if (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_TAGGED)) + s->s_flags |= MS_TAGGED; + rs = SB_DISK_SUPER_BLOCK(s); /* Let's do basic sanity check to verify that underlying device is not smaller than the filesystem. If the check fails then abort and scream, diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/reiserfs/super.c.orig kernel-2.6.32.54.vs/linux-2.6.32/fs/reiserfs/super.c.orig --- kernel-2.6.32.54/linux-2.6.32/fs/reiserfs/super.c.orig 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/reiserfs/super.c.orig 2012-01-16 14:47:19.170255916 +0100 @@ -0,0 +1,2241 @@ +/* + * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README + * + * Trivial changes by Alan Cox to add the LFS fixes + * + * Trivial Changes: + * Rights granted to Hans Reiser to redistribute under other terms providing + * he accepts all liability including but not limited to patent, fitness + * for purpose, and direct or indirect claims arising from failure to perform. + * + * NO WARRANTY + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct file_system_type reiserfs_fs_type; + +static const char reiserfs_3_5_magic_string[] = REISERFS_SUPER_MAGIC_STRING; +static const char reiserfs_3_6_magic_string[] = REISER2FS_SUPER_MAGIC_STRING; +static const char reiserfs_jr_magic_string[] = REISER2FS_JR_SUPER_MAGIC_STRING; + +int is_reiserfs_3_5(struct reiserfs_super_block *rs) +{ + return !strncmp(rs->s_v1.s_magic, reiserfs_3_5_magic_string, + strlen(reiserfs_3_5_magic_string)); +} + +int is_reiserfs_3_6(struct reiserfs_super_block *rs) +{ + return !strncmp(rs->s_v1.s_magic, reiserfs_3_6_magic_string, + strlen(reiserfs_3_6_magic_string)); +} + +int is_reiserfs_jr(struct reiserfs_super_block *rs) +{ + return !strncmp(rs->s_v1.s_magic, reiserfs_jr_magic_string, + strlen(reiserfs_jr_magic_string)); +} + +static int is_any_reiserfs_magic_string(struct reiserfs_super_block *rs) +{ + return (is_reiserfs_3_5(rs) || is_reiserfs_3_6(rs) || + is_reiserfs_jr(rs)); +} + +static int reiserfs_remount(struct super_block *s, int *flags, char *data); +static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf); + +static int reiserfs_sync_fs(struct super_block *s, int wait) +{ + struct reiserfs_transaction_handle th; + + reiserfs_write_lock(s); + if (!journal_begin(&th, s, 1)) + if (!journal_end_sync(&th, s, 1)) + reiserfs_flush_old_commits(s); + s->s_dirt = 0; /* Even if it's not true. + * We'll loop forever in sync_supers otherwise */ + reiserfs_write_unlock(s); + return 0; +} + +static void reiserfs_write_super(struct super_block *s) +{ + reiserfs_sync_fs(s, 1); +} + +static int reiserfs_freeze(struct super_block *s) +{ + struct reiserfs_transaction_handle th; + reiserfs_write_lock(s); + if (!(s->s_flags & MS_RDONLY)) { + int err = journal_begin(&th, s, 1); + if (err) { + reiserfs_block_writes(&th); + } else { + reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), + 1); + journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s)); + reiserfs_block_writes(&th); + journal_end_sync(&th, s, 1); + } + } + s->s_dirt = 0; + reiserfs_write_unlock(s); + return 0; +} + +static int reiserfs_unfreeze(struct super_block *s) +{ + reiserfs_allow_writes(s); + return 0; +} + +extern const struct in_core_key MAX_IN_CORE_KEY; + +/* this is used to delete "save link" when there are no items of a + file it points to. It can either happen if unlink is completed but + "save unlink" removal, or if file has both unlink and truncate + pending and as unlink completes first (because key of "save link" + protecting unlink is bigger that a key lf "save link" which + protects truncate), so there left no items to make truncate + completion on */ +static int remove_save_link_only(struct super_block *s, + struct reiserfs_key *key, int oid_free) +{ + struct reiserfs_transaction_handle th; + int err; + + /* we are going to do one balancing */ + err = journal_begin(&th, s, JOURNAL_PER_BALANCE_CNT); + if (err) + return err; + + reiserfs_delete_solid_item(&th, NULL, key); + if (oid_free) + /* removals are protected by direct items */ + reiserfs_release_objectid(&th, le32_to_cpu(key->k_objectid)); + + return journal_end(&th, s, JOURNAL_PER_BALANCE_CNT); +} + +#ifdef CONFIG_QUOTA +static int reiserfs_quota_on_mount(struct super_block *, int); +#endif + +/* look for uncompleted unlinks and truncates and complete them */ +static int finish_unfinished(struct super_block *s) +{ + INITIALIZE_PATH(path); + struct cpu_key max_cpu_key, obj_key; + struct reiserfs_key save_link_key, last_inode_key; + int retval = 0; + struct item_head *ih; + struct buffer_head *bh; + int item_pos; + char *item; + int done; + struct inode *inode; + int truncate; +#ifdef CONFIG_QUOTA + int i; + int ms_active_set; +#endif + + /* compose key to look for "save" links */ + max_cpu_key.version = KEY_FORMAT_3_5; + max_cpu_key.on_disk_key.k_dir_id = ~0U; + max_cpu_key.on_disk_key.k_objectid = ~0U; + set_cpu_key_k_offset(&max_cpu_key, ~0U); + max_cpu_key.key_length = 3; + + memset(&last_inode_key, 0, sizeof(last_inode_key)); + +#ifdef CONFIG_QUOTA + /* Needed for iput() to work correctly and not trash data */ + if (s->s_flags & MS_ACTIVE) { + ms_active_set = 0; + } else { + ms_active_set = 1; + s->s_flags |= MS_ACTIVE; + } + /* Turn on quotas so that they are updated correctly */ + for (i = 0; i < MAXQUOTAS; i++) { + if (REISERFS_SB(s)->s_qf_names[i]) { + int ret = reiserfs_quota_on_mount(s, i); + if (ret < 0) + reiserfs_warning(s, "reiserfs-2500", + "cannot turn on journaled " + "quota: error %d", ret); + } + } +#endif + + done = 0; + REISERFS_SB(s)->s_is_unlinked_ok = 1; + while (!retval) { + retval = search_item(s, &max_cpu_key, &path); + if (retval != ITEM_NOT_FOUND) { + reiserfs_error(s, "vs-2140", + "search_by_key returned %d", retval); + break; + } + + bh = get_last_bh(&path); + item_pos = get_item_pos(&path); + if (item_pos != B_NR_ITEMS(bh)) { + reiserfs_warning(s, "vs-2060", + "wrong position found"); + break; + } + item_pos--; + ih = B_N_PITEM_HEAD(bh, item_pos); + + if (le32_to_cpu(ih->ih_key.k_dir_id) != MAX_KEY_OBJECTID) + /* there are no "save" links anymore */ + break; + + save_link_key = ih->ih_key; + if (is_indirect_le_ih(ih)) + truncate = 1; + else + truncate = 0; + + /* reiserfs_iget needs k_dirid and k_objectid only */ + item = B_I_PITEM(bh, ih); + obj_key.on_disk_key.k_dir_id = le32_to_cpu(*(__le32 *) item); + obj_key.on_disk_key.k_objectid = + le32_to_cpu(ih->ih_key.k_objectid); + obj_key.on_disk_key.k_offset = 0; + obj_key.on_disk_key.k_type = 0; + + pathrelse(&path); + + inode = reiserfs_iget(s, &obj_key); + if (!inode) { + /* the unlink almost completed, it just did not manage to remove + "save" link and release objectid */ + reiserfs_warning(s, "vs-2180", "iget failed for %K", + &obj_key); + retval = remove_save_link_only(s, &save_link_key, 1); + continue; + } + + if (!truncate && inode->i_nlink) { + /* file is not unlinked */ + reiserfs_warning(s, "vs-2185", + "file %K is not unlinked", + &obj_key); + retval = remove_save_link_only(s, &save_link_key, 0); + continue; + } + vfs_dq_init(inode); + + if (truncate && S_ISDIR(inode->i_mode)) { + /* We got a truncate request for a dir which is impossible. + The only imaginable way is to execute unfinished truncate request + then boot into old kernel, remove the file and create dir with + the same key. */ + reiserfs_warning(s, "green-2101", + "impossible truncate on a " + "directory %k. Please report", + INODE_PKEY(inode)); + retval = remove_save_link_only(s, &save_link_key, 0); + truncate = 0; + iput(inode); + continue; + } + + if (truncate) { + REISERFS_I(inode)->i_flags |= + i_link_saved_truncate_mask; + /* not completed truncate found. New size was committed together + with "save" link */ + reiserfs_info(s, "Truncating %k to %Ld ..", + INODE_PKEY(inode), inode->i_size); + reiserfs_truncate_file(inode, + 0 + /*don't update modification time */ + ); + retval = remove_save_link(inode, truncate); + } else { + REISERFS_I(inode)->i_flags |= i_link_saved_unlink_mask; + /* not completed unlink (rmdir) found */ + reiserfs_info(s, "Removing %k..", INODE_PKEY(inode)); + if (memcmp(&last_inode_key, INODE_PKEY(inode), + sizeof(last_inode_key))){ + last_inode_key = *INODE_PKEY(inode); + /* removal gets completed in iput */ + retval = 0; + } else { + reiserfs_warning(s, "super-2189", "Dead loop " + "in finish_unfinished " + "detected, just remove " + "save link\n"); + retval = remove_save_link_only(s, + &save_link_key, 0); + } + } + + iput(inode); + printk("done\n"); + done++; + } + REISERFS_SB(s)->s_is_unlinked_ok = 0; + +#ifdef CONFIG_QUOTA + /* Turn quotas off */ + for (i = 0; i < MAXQUOTAS; i++) { + if (sb_dqopt(s)->files[i]) + vfs_quota_off(s, i, 0); + } + if (ms_active_set) + /* Restore the flag back */ + s->s_flags &= ~MS_ACTIVE; +#endif + pathrelse(&path); + if (done) + reiserfs_info(s, "There were %d uncompleted unlinks/truncates. " + "Completed\n", done); + return retval; +} + +/* to protect file being unlinked from getting lost we "safe" link files + being unlinked. This link will be deleted in the same transaction with last + item of file. mounting the filesystem we scan all these links and remove + files which almost got lost */ +void add_save_link(struct reiserfs_transaction_handle *th, + struct inode *inode, int truncate) +{ + INITIALIZE_PATH(path); + int retval; + struct cpu_key key; + struct item_head ih; + __le32 link; + + BUG_ON(!th->t_trans_id); + + /* file can only get one "save link" of each kind */ + RFALSE(truncate && + (REISERFS_I(inode)->i_flags & i_link_saved_truncate_mask), + "saved link already exists for truncated inode %lx", + (long)inode->i_ino); + RFALSE(!truncate && + (REISERFS_I(inode)->i_flags & i_link_saved_unlink_mask), + "saved link already exists for unlinked inode %lx", + (long)inode->i_ino); + + /* setup key of "save" link */ + key.version = KEY_FORMAT_3_5; + key.on_disk_key.k_dir_id = MAX_KEY_OBJECTID; + key.on_disk_key.k_objectid = inode->i_ino; + if (!truncate) { + /* unlink, rmdir, rename */ + set_cpu_key_k_offset(&key, 1 + inode->i_sb->s_blocksize); + set_cpu_key_k_type(&key, TYPE_DIRECT); + + /* item head of "safe" link */ + make_le_item_head(&ih, &key, key.version, + 1 + inode->i_sb->s_blocksize, TYPE_DIRECT, + 4 /*length */ , 0xffff /*free space */ ); + } else { + /* truncate */ + if (S_ISDIR(inode->i_mode)) + reiserfs_warning(inode->i_sb, "green-2102", + "Adding a truncate savelink for " + "a directory %k! Please report", + INODE_PKEY(inode)); + set_cpu_key_k_offset(&key, 1); + set_cpu_key_k_type(&key, TYPE_INDIRECT); + + /* item head of "safe" link */ + make_le_item_head(&ih, &key, key.version, 1, TYPE_INDIRECT, + 4 /*length */ , 0 /*free space */ ); + } + key.key_length = 3; + + /* look for its place in the tree */ + retval = search_item(inode->i_sb, &key, &path); + if (retval != ITEM_NOT_FOUND) { + if (retval != -ENOSPC) + reiserfs_error(inode->i_sb, "vs-2100", + "search_by_key (%K) returned %d", &key, + retval); + pathrelse(&path); + return; + } + + /* body of "save" link */ + link = INODE_PKEY(inode)->k_dir_id; + + /* put "save" link inot tree, don't charge quota to anyone */ + retval = + reiserfs_insert_item(th, &path, &key, &ih, NULL, (char *)&link); + if (retval) { + if (retval != -ENOSPC) + reiserfs_error(inode->i_sb, "vs-2120", + "insert_item returned %d", retval); + } else { + if (truncate) + REISERFS_I(inode)->i_flags |= + i_link_saved_truncate_mask; + else + REISERFS_I(inode)->i_flags |= i_link_saved_unlink_mask; + } +} + +/* this opens transaction unlike add_save_link */ +int remove_save_link(struct inode *inode, int truncate) +{ + struct reiserfs_transaction_handle th; + struct reiserfs_key key; + int err; + + /* we are going to do one balancing only */ + err = journal_begin(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT); + if (err) + return err; + + /* setup key of "save" link */ + key.k_dir_id = cpu_to_le32(MAX_KEY_OBJECTID); + key.k_objectid = INODE_PKEY(inode)->k_objectid; + if (!truncate) { + /* unlink, rmdir, rename */ + set_le_key_k_offset(KEY_FORMAT_3_5, &key, + 1 + inode->i_sb->s_blocksize); + set_le_key_k_type(KEY_FORMAT_3_5, &key, TYPE_DIRECT); + } else { + /* truncate */ + set_le_key_k_offset(KEY_FORMAT_3_5, &key, 1); + set_le_key_k_type(KEY_FORMAT_3_5, &key, TYPE_INDIRECT); + } + + if ((truncate && + (REISERFS_I(inode)->i_flags & i_link_saved_truncate_mask)) || + (!truncate && + (REISERFS_I(inode)->i_flags & i_link_saved_unlink_mask))) + /* don't take quota bytes from anywhere */ + reiserfs_delete_solid_item(&th, NULL, &key); + if (!truncate) { + reiserfs_release_objectid(&th, inode->i_ino); + REISERFS_I(inode)->i_flags &= ~i_link_saved_unlink_mask; + } else + REISERFS_I(inode)->i_flags &= ~i_link_saved_truncate_mask; + + return journal_end(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT); +} + +static void reiserfs_kill_sb(struct super_block *s) +{ + if (REISERFS_SB(s)) { + /* + * Force any pending inode evictions to occur now. Any + * inodes to be removed that have extended attributes + * associated with them need to clean them up before + * we can release the extended attribute root dentries. + * shrink_dcache_for_umount will BUG if we don't release + * those before it's called so ->put_super is too late. + */ + shrink_dcache_sb(s); + + dput(REISERFS_SB(s)->xattr_root); + REISERFS_SB(s)->xattr_root = NULL; + dput(REISERFS_SB(s)->priv_root); + REISERFS_SB(s)->priv_root = NULL; + } + + kill_block_super(s); +} + +static void reiserfs_put_super(struct super_block *s) +{ + struct reiserfs_transaction_handle th; + th.t_trans_id = 0; + + lock_kernel(); + + if (s->s_dirt) + reiserfs_write_super(s); + + /* change file system state to current state if it was mounted with read-write permissions */ + if (!(s->s_flags & MS_RDONLY)) { + if (!journal_begin(&th, s, 10)) { + reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), + 1); + set_sb_umount_state(SB_DISK_SUPER_BLOCK(s), + REISERFS_SB(s)->s_mount_state); + journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s)); + } + } + + /* note, journal_release checks for readonly mount, and can decide not + ** to do a journal_end + */ + journal_release(&th, s); + + reiserfs_free_bitmap_cache(s); + + brelse(SB_BUFFER_WITH_SB(s)); + + print_statistics(s); + + if (REISERFS_SB(s)->reserved_blocks != 0) { + reiserfs_warning(s, "green-2005", "reserved blocks left %d", + REISERFS_SB(s)->reserved_blocks); + } + + reiserfs_proc_info_done(s); + + kfree(s->s_fs_info); + s->s_fs_info = NULL; + + unlock_kernel(); +} + +static struct kmem_cache *reiserfs_inode_cachep; + +static struct inode *reiserfs_alloc_inode(struct super_block *sb) +{ + struct reiserfs_inode_info *ei; + ei = (struct reiserfs_inode_info *) + kmem_cache_alloc(reiserfs_inode_cachep, GFP_KERNEL); + if (!ei) + return NULL; + return &ei->vfs_inode; +} + +static void reiserfs_destroy_inode(struct inode *inode) +{ + kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode)); +} + +static void init_once(void *foo) +{ + struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo; + + INIT_LIST_HEAD(&ei->i_prealloc_list); + inode_init_once(&ei->vfs_inode); +} + +static int init_inodecache(void) +{ + reiserfs_inode_cachep = kmem_cache_create("reiser_inode_cache", + sizeof(struct + reiserfs_inode_info), + 0, (SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD), + init_once); + if (reiserfs_inode_cachep == NULL) + return -ENOMEM; + return 0; +} + +static void destroy_inodecache(void) +{ + kmem_cache_destroy(reiserfs_inode_cachep); +} + +/* we don't mark inodes dirty, we just log them */ +static void reiserfs_dirty_inode(struct inode *inode) +{ + struct reiserfs_transaction_handle th; + + int err = 0; + if (inode->i_sb->s_flags & MS_RDONLY) { + reiserfs_warning(inode->i_sb, "clm-6006", + "writing inode %lu on readonly FS", + inode->i_ino); + return; + } + reiserfs_write_lock(inode->i_sb); + + /* this is really only used for atime updates, so they don't have + ** to be included in O_SYNC or fsync + */ + err = journal_begin(&th, inode->i_sb, 1); + if (err) { + reiserfs_write_unlock(inode->i_sb); + return; + } + reiserfs_update_sd(&th, inode); + journal_end(&th, inode->i_sb, 1); + reiserfs_write_unlock(inode->i_sb); +} + +#ifdef CONFIG_QUOTA +static ssize_t reiserfs_quota_write(struct super_block *, int, const char *, + size_t, loff_t); +static ssize_t reiserfs_quota_read(struct super_block *, int, char *, size_t, + loff_t); +#endif + +static const struct super_operations reiserfs_sops = { + .alloc_inode = reiserfs_alloc_inode, + .destroy_inode = reiserfs_destroy_inode, + .write_inode = reiserfs_write_inode, + .dirty_inode = reiserfs_dirty_inode, + .delete_inode = reiserfs_delete_inode, + .put_super = reiserfs_put_super, + .write_super = reiserfs_write_super, + .sync_fs = reiserfs_sync_fs, + .freeze_fs = reiserfs_freeze, + .unfreeze_fs = reiserfs_unfreeze, + .statfs = reiserfs_statfs, + .remount_fs = reiserfs_remount, + .show_options = generic_show_options, +#ifdef CONFIG_QUOTA + .quota_read = reiserfs_quota_read, + .quota_write = reiserfs_quota_write, +#endif +}; + +#ifdef CONFIG_QUOTA +#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group") + +static int reiserfs_write_dquot(struct dquot *); +static int reiserfs_acquire_dquot(struct dquot *); +static int reiserfs_release_dquot(struct dquot *); +static int reiserfs_mark_dquot_dirty(struct dquot *); +static int reiserfs_write_info(struct super_block *, int); +static int reiserfs_quota_on(struct super_block *, int, int, char *, int); + +static const struct dquot_operations reiserfs_quota_operations = { + .initialize = dquot_initialize, + .drop = dquot_drop, + .alloc_space = dquot_alloc_space, + .alloc_inode = dquot_alloc_inode, + .free_space = dquot_free_space, + .free_inode = dquot_free_inode, + .transfer = dquot_transfer, + .write_dquot = reiserfs_write_dquot, + .acquire_dquot = reiserfs_acquire_dquot, + .release_dquot = reiserfs_release_dquot, + .mark_dirty = reiserfs_mark_dquot_dirty, + .write_info = reiserfs_write_info, + .alloc_dquot = dquot_alloc, + .destroy_dquot = dquot_destroy, +}; + +static const struct quotactl_ops reiserfs_qctl_operations = { + .quota_on = reiserfs_quota_on, + .quota_off = vfs_quota_off, + .quota_sync = vfs_quota_sync, + .get_info = vfs_get_dqinfo, + .set_info = vfs_set_dqinfo, + .get_dqblk = vfs_get_dqblk, + .set_dqblk = vfs_set_dqblk, +}; +#endif + +static const struct export_operations reiserfs_export_ops = { + .encode_fh = reiserfs_encode_fh, + .fh_to_dentry = reiserfs_fh_to_dentry, + .fh_to_parent = reiserfs_fh_to_parent, + .get_parent = reiserfs_get_parent, +}; + +/* this struct is used in reiserfs_getopt () for containing the value for those + mount options that have values rather than being toggles. */ +typedef struct { + char *value; + int setmask; /* bitmask which is to set on mount_options bitmask when this + value is found, 0 is no bits are to be changed. */ + int clrmask; /* bitmask which is to clear on mount_options bitmask when this + value is found, 0 is no bits are to be changed. This is + applied BEFORE setmask */ +} arg_desc_t; + +/* Set this bit in arg_required to allow empty arguments */ +#define REISERFS_OPT_ALLOWEMPTY 31 + +/* this struct is used in reiserfs_getopt() for describing the set of reiserfs + mount options */ +typedef struct { + char *option_name; + int arg_required; /* 0 if argument is not required, not 0 otherwise */ + const arg_desc_t *values; /* list of values accepted by an option */ + int setmask; /* bitmask which is to set on mount_options bitmask when this + value is found, 0 is no bits are to be changed. */ + int clrmask; /* bitmask which is to clear on mount_options bitmask when this + value is found, 0 is no bits are to be changed. This is + applied BEFORE setmask */ +} opt_desc_t; + +/* possible values for -o data= */ +static const arg_desc_t logging_mode[] = { + {"ordered", 1 << REISERFS_DATA_ORDERED, + (1 << REISERFS_DATA_LOG | 1 << REISERFS_DATA_WRITEBACK)}, + {"journal", 1 << REISERFS_DATA_LOG, + (1 << REISERFS_DATA_ORDERED | 1 << REISERFS_DATA_WRITEBACK)}, + {"writeback", 1 << REISERFS_DATA_WRITEBACK, + (1 << REISERFS_DATA_ORDERED | 1 << REISERFS_DATA_LOG)}, + {.value = NULL} +}; + +/* possible values for -o barrier= */ +static const arg_desc_t barrier_mode[] = { + {"none", 1 << REISERFS_BARRIER_NONE, 1 << REISERFS_BARRIER_FLUSH}, + {"flush", 1 << REISERFS_BARRIER_FLUSH, 1 << REISERFS_BARRIER_NONE}, + {.value = NULL} +}; + +/* possible values for "-o block-allocator=" and bits which are to be set in + s_mount_opt of reiserfs specific part of in-core super block */ +static const arg_desc_t balloc[] = { + {"noborder", 1 << REISERFS_NO_BORDER, 0}, + {"border", 0, 1 << REISERFS_NO_BORDER}, + {"no_unhashed_relocation", 1 << REISERFS_NO_UNHASHED_RELOCATION, 0}, + {"hashed_relocation", 1 << REISERFS_HASHED_RELOCATION, 0}, + {"test4", 1 << REISERFS_TEST4, 0}, + {"notest4", 0, 1 << REISERFS_TEST4}, + {NULL, 0, 0} +}; + +static const arg_desc_t tails[] = { + {"on", 1 << REISERFS_LARGETAIL, 1 << REISERFS_SMALLTAIL}, + {"off", 0, (1 << REISERFS_LARGETAIL) | (1 << REISERFS_SMALLTAIL)}, + {"small", 1 << REISERFS_SMALLTAIL, 1 << REISERFS_LARGETAIL}, + {NULL, 0, 0} +}; + +static const arg_desc_t error_actions[] = { + {"panic", 1 << REISERFS_ERROR_PANIC, + (1 << REISERFS_ERROR_RO | 1 << REISERFS_ERROR_CONTINUE)}, + {"ro-remount", 1 << REISERFS_ERROR_RO, + (1 << REISERFS_ERROR_PANIC | 1 << REISERFS_ERROR_CONTINUE)}, +#ifdef REISERFS_JOURNAL_ERROR_ALLOWS_NO_LOG + {"continue", 1 << REISERFS_ERROR_CONTINUE, + (1 << REISERFS_ERROR_PANIC | 1 << REISERFS_ERROR_RO)}, +#endif + {NULL, 0, 0}, +}; + +/* proceed only one option from a list *cur - string containing of mount options + opts - array of options which are accepted + opt_arg - if option is found and requires an argument and if it is specifed + in the input - pointer to the argument is stored here + bit_flags - if option requires to set a certain bit - it is set here + return -1 if unknown option is found, opt->arg_required otherwise */ +static int reiserfs_getopt(struct super_block *s, char **cur, opt_desc_t * opts, + char **opt_arg, unsigned long *bit_flags) +{ + char *p; + /* foo=bar, + ^ ^ ^ + | | +-- option_end + | +-- arg_start + +-- option_start + */ + const opt_desc_t *opt; + const arg_desc_t *arg; + + p = *cur; + + /* assume argument cannot contain commas */ + *cur = strchr(p, ','); + if (*cur) { + *(*cur) = '\0'; + (*cur)++; + } + + if (!strncmp(p, "alloc=", 6)) { + /* Ugly special case, probably we should redo options parser so that + it can understand several arguments for some options, also so that + it can fill several bitfields with option values. */ + if (reiserfs_parse_alloc_options(s, p + 6)) { + return -1; + } else { + return 0; + } + } + + /* for every option in the list */ + for (opt = opts; opt->option_name; opt++) { + if (!strncmp(p, opt->option_name, strlen(opt->option_name))) { + if (bit_flags) { + if (opt->clrmask == + (1 << REISERFS_UNSUPPORTED_OPT)) + reiserfs_warning(s, "super-6500", + "%s not supported.\n", + p); + else + *bit_flags &= ~opt->clrmask; + if (opt->setmask == + (1 << REISERFS_UNSUPPORTED_OPT)) + reiserfs_warning(s, "super-6501", + "%s not supported.\n", + p); + else + *bit_flags |= opt->setmask; + } + break; + } + } + if (!opt->option_name) { + reiserfs_warning(s, "super-6502", + "unknown mount option \"%s\"", p); + return -1; + } + + p += strlen(opt->option_name); + switch (*p) { + case '=': + if (!opt->arg_required) { + reiserfs_warning(s, "super-6503", + "the option \"%s\" does not " + "require an argument\n", + opt->option_name); + return -1; + } + break; + + case 0: + if (opt->arg_required) { + reiserfs_warning(s, "super-6504", + "the option \"%s\" requires an " + "argument\n", opt->option_name); + return -1; + } + break; + default: + reiserfs_warning(s, "super-6505", + "head of option \"%s\" is only correct\n", + opt->option_name); + return -1; + } + + /* move to the argument, or to next option if argument is not required */ + p++; + + if (opt->arg_required + && !(opt->arg_required & (1 << REISERFS_OPT_ALLOWEMPTY)) + && !strlen(p)) { + /* this catches "option=," if not allowed */ + reiserfs_warning(s, "super-6506", + "empty argument for \"%s\"\n", + opt->option_name); + return -1; + } + + if (!opt->values) { + /* *=NULLopt_arg contains pointer to argument */ + *opt_arg = p; + return opt->arg_required & ~(1 << REISERFS_OPT_ALLOWEMPTY); + } + + /* values possible for this option are listed in opt->values */ + for (arg = opt->values; arg->value; arg++) { + if (!strcmp(p, arg->value)) { + if (bit_flags) { + *bit_flags &= ~arg->clrmask; + *bit_flags |= arg->setmask; + } + return opt->arg_required; + } + } + + reiserfs_warning(s, "super-6506", + "bad value \"%s\" for option \"%s\"\n", p, + opt->option_name); + return -1; +} + +/* returns 0 if something is wrong in option string, 1 - otherwise */ +static int reiserfs_parse_options(struct super_block *s, char *options, /* string given via mount's -o */ + unsigned long *mount_options, + /* after the parsing phase, contains the + collection of bitflags defining what + mount options were selected. */ + unsigned long *blocks, /* strtol-ed from NNN of resize=NNN */ + char **jdev_name, + unsigned int *commit_max_age, + char **qf_names, + unsigned int *qfmt) +{ + int c; + char *arg = NULL; + char *pos; + opt_desc_t opts[] = { + /* Compatibility stuff, so that -o notail for old setups still work */ + {"tails",.arg_required = 't',.values = tails}, + {"notail",.clrmask = + (1 << REISERFS_LARGETAIL) | (1 << REISERFS_SMALLTAIL)}, + {"conv",.setmask = 1 << REISERFS_CONVERT}, + {"attrs",.setmask = 1 << REISERFS_ATTRS}, + {"noattrs",.clrmask = 1 << REISERFS_ATTRS}, + {"expose_privroot", .setmask = 1 << REISERFS_EXPOSE_PRIVROOT}, +#ifdef CONFIG_REISERFS_FS_XATTR + {"user_xattr",.setmask = 1 << REISERFS_XATTRS_USER}, + {"nouser_xattr",.clrmask = 1 << REISERFS_XATTRS_USER}, +#else + {"user_xattr",.setmask = 1 << REISERFS_UNSUPPORTED_OPT}, + {"nouser_xattr",.clrmask = 1 << REISERFS_UNSUPPORTED_OPT}, +#endif +#ifdef CONFIG_REISERFS_FS_POSIX_ACL + {"acl",.setmask = 1 << REISERFS_POSIXACL}, + {"noacl",.clrmask = 1 << REISERFS_POSIXACL}, +#else + {"acl",.setmask = 1 << REISERFS_UNSUPPORTED_OPT}, + {"noacl",.clrmask = 1 << REISERFS_UNSUPPORTED_OPT}, +#endif + {.option_name = "nolog"}, + {"replayonly",.setmask = 1 << REPLAYONLY}, + {"block-allocator",.arg_required = 'a',.values = balloc}, + {"data",.arg_required = 'd',.values = logging_mode}, + {"barrier",.arg_required = 'b',.values = barrier_mode}, + {"resize",.arg_required = 'r',.values = NULL}, + {"jdev",.arg_required = 'j',.values = NULL}, + {"nolargeio",.arg_required = 'w',.values = NULL}, + {"commit",.arg_required = 'c',.values = NULL}, + {"usrquota",.setmask = 1 << REISERFS_QUOTA}, + {"grpquota",.setmask = 1 << REISERFS_QUOTA}, + {"noquota",.clrmask = 1 << REISERFS_QUOTA}, + {"errors",.arg_required = 'e',.values = error_actions}, + {"usrjquota",.arg_required = + 'u' | (1 << REISERFS_OPT_ALLOWEMPTY),.values = NULL}, + {"grpjquota",.arg_required = + 'g' | (1 << REISERFS_OPT_ALLOWEMPTY),.values = NULL}, + {"jqfmt",.arg_required = 'f',.values = NULL}, + {.option_name = NULL} + }; + + *blocks = 0; + if (!options || !*options) + /* use default configuration: create tails, journaling on, no + conversion to newest format */ + return 1; + + for (pos = options; pos;) { + c = reiserfs_getopt(s, &pos, opts, &arg, mount_options); + if (c == -1) + /* wrong option is given */ + return 0; + + if (c == 'r') { + char *p; + + p = NULL; + /* "resize=NNN" or "resize=auto" */ + + if (!strcmp(arg, "auto")) { + /* From JFS code, to auto-get the size. */ + *blocks = + s->s_bdev->bd_inode->i_size >> s-> + s_blocksize_bits; + } else { + *blocks = simple_strtoul(arg, &p, 0); + if (*p != '\0') { + /* NNN does not look like a number */ + reiserfs_warning(s, "super-6507", + "bad value %s for " + "-oresize\n", arg); + return 0; + } + } + } + + if (c == 'c') { + char *p = NULL; + unsigned long val = simple_strtoul(arg, &p, 0); + /* commit=NNN (time in seconds) */ + if (*p != '\0' || val >= (unsigned int)-1) { + reiserfs_warning(s, "super-6508", + "bad value %s for -ocommit\n", + arg); + return 0; + } + *commit_max_age = (unsigned int)val; + } + + if (c == 'w') { + reiserfs_warning(s, "super-6509", "nolargeio option " + "is no longer supported"); + return 0; + } + + if (c == 'j') { + if (arg && *arg && jdev_name) { + if (*jdev_name) { //Hm, already assigned? + reiserfs_warning(s, "super-6510", + "journal device was " + "already specified to " + "be %s", *jdev_name); + return 0; + } + *jdev_name = arg; + } + } +#ifdef CONFIG_QUOTA + if (c == 'u' || c == 'g') { + int qtype = c == 'u' ? USRQUOTA : GRPQUOTA; + + if (sb_any_quota_loaded(s) && + (!*arg != !REISERFS_SB(s)->s_qf_names[qtype])) { + reiserfs_warning(s, "super-6511", + "cannot change journaled " + "quota options when quota " + "turned on."); + return 0; + } + if (*arg) { /* Some filename specified? */ + if (REISERFS_SB(s)->s_qf_names[qtype] + && strcmp(REISERFS_SB(s)->s_qf_names[qtype], + arg)) { + reiserfs_warning(s, "super-6512", + "%s quota file " + "already specified.", + QTYPE2NAME(qtype)); + return 0; + } + if (strchr(arg, '/')) { + reiserfs_warning(s, "super-6513", + "quotafile must be " + "on filesystem root."); + return 0; + } + qf_names[qtype] = + kmalloc(strlen(arg) + 1, GFP_KERNEL); + if (!qf_names[qtype]) { + reiserfs_warning(s, "reiserfs-2502", + "not enough memory " + "for storing " + "quotafile name."); + return 0; + } + strcpy(qf_names[qtype], arg); + *mount_options |= 1 << REISERFS_QUOTA; + } else { + if (qf_names[qtype] != + REISERFS_SB(s)->s_qf_names[qtype]) + kfree(qf_names[qtype]); + qf_names[qtype] = NULL; + } + } + if (c == 'f') { + if (!strcmp(arg, "vfsold")) + *qfmt = QFMT_VFS_OLD; + else if (!strcmp(arg, "vfsv0")) + *qfmt = QFMT_VFS_V0; + else { + reiserfs_warning(s, "super-6514", + "unknown quota format " + "specified."); + return 0; + } + if (sb_any_quota_loaded(s) && + *qfmt != REISERFS_SB(s)->s_jquota_fmt) { + reiserfs_warning(s, "super-6515", + "cannot change journaled " + "quota options when quota " + "turned on."); + return 0; + } + } +#else + if (c == 'u' || c == 'g' || c == 'f') { + reiserfs_warning(s, "reiserfs-2503", "journaled " + "quota options not supported."); + return 0; + } +#endif + } + +#ifdef CONFIG_QUOTA + if (!REISERFS_SB(s)->s_jquota_fmt && !*qfmt + && (qf_names[USRQUOTA] || qf_names[GRPQUOTA])) { + reiserfs_warning(s, "super-6515", + "journaled quota format not specified."); + return 0; + } + /* This checking is not precise wrt the quota type but for our purposes it is sufficient */ + if (!(*mount_options & (1 << REISERFS_QUOTA)) + && sb_any_quota_loaded(s)) { + reiserfs_warning(s, "super-6516", "quota options must " + "be present when quota is turned on."); + return 0; + } +#endif + + return 1; +} + +static void switch_data_mode(struct super_block *s, unsigned long mode) +{ + REISERFS_SB(s)->s_mount_opt &= ~((1 << REISERFS_DATA_LOG) | + (1 << REISERFS_DATA_ORDERED) | + (1 << REISERFS_DATA_WRITEBACK)); + REISERFS_SB(s)->s_mount_opt |= (1 << mode); +} + +static void handle_data_mode(struct super_block *s, unsigned long mount_options) +{ + if (mount_options & (1 << REISERFS_DATA_LOG)) { + if (!reiserfs_data_log(s)) { + switch_data_mode(s, REISERFS_DATA_LOG); + reiserfs_info(s, "switching to journaled data mode\n"); + } + } else if (mount_options & (1 << REISERFS_DATA_ORDERED)) { + if (!reiserfs_data_ordered(s)) { + switch_data_mode(s, REISERFS_DATA_ORDERED); + reiserfs_info(s, "switching to ordered data mode\n"); + } + } else if (mount_options & (1 << REISERFS_DATA_WRITEBACK)) { + if (!reiserfs_data_writeback(s)) { + switch_data_mode(s, REISERFS_DATA_WRITEBACK); + reiserfs_info(s, "switching to writeback data mode\n"); + } + } +} + +static void handle_barrier_mode(struct super_block *s, unsigned long bits) +{ + int flush = (1 << REISERFS_BARRIER_FLUSH); + int none = (1 << REISERFS_BARRIER_NONE); + int all_barrier = flush | none; + + if (bits & all_barrier) { + REISERFS_SB(s)->s_mount_opt &= ~all_barrier; + if (bits & flush) { + REISERFS_SB(s)->s_mount_opt |= flush; + printk("reiserfs: enabling write barrier flush mode\n"); + } else if (bits & none) { + REISERFS_SB(s)->s_mount_opt |= none; + printk("reiserfs: write barriers turned off\n"); + } + } +} + +static void handle_attrs(struct super_block *s) +{ + struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s); + + if (reiserfs_attrs(s)) { + if (old_format_only(s)) { + reiserfs_warning(s, "super-6517", "cannot support " + "attributes on 3.5.x disk format"); + REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_ATTRS); + return; + } + if (!(le32_to_cpu(rs->s_flags) & reiserfs_attrs_cleared)) { + reiserfs_warning(s, "super-6518", "cannot support " + "attributes until flag is set in " + "super-block"); + REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_ATTRS); + } + } +} + +#ifdef CONFIG_QUOTA +static void handle_quota_files(struct super_block *s, char **qf_names, + unsigned int *qfmt) +{ + int i; + + for (i = 0; i < MAXQUOTAS; i++) { + if (qf_names[i] != REISERFS_SB(s)->s_qf_names[i]) + kfree(REISERFS_SB(s)->s_qf_names[i]); + REISERFS_SB(s)->s_qf_names[i] = qf_names[i]; + } + if (*qfmt) + REISERFS_SB(s)->s_jquota_fmt = *qfmt; +} +#endif + +static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) +{ + struct reiserfs_super_block *rs; + struct reiserfs_transaction_handle th; + unsigned long blocks; + unsigned long mount_options = REISERFS_SB(s)->s_mount_opt; + unsigned long safe_mask = 0; + unsigned int commit_max_age = (unsigned int)-1; + struct reiserfs_journal *journal = SB_JOURNAL(s); + char *new_opts = kstrdup(arg, GFP_KERNEL); + int err; + char *qf_names[MAXQUOTAS]; + unsigned int qfmt = 0; +#ifdef CONFIG_QUOTA + int i; + + memcpy(qf_names, REISERFS_SB(s)->s_qf_names, sizeof(qf_names)); +#endif + + lock_kernel(); + rs = SB_DISK_SUPER_BLOCK(s); + + if (!reiserfs_parse_options + (s, arg, &mount_options, &blocks, NULL, &commit_max_age, + qf_names, &qfmt)) { +#ifdef CONFIG_QUOTA + for (i = 0; i < MAXQUOTAS; i++) + if (qf_names[i] != REISERFS_SB(s)->s_qf_names[i]) + kfree(qf_names[i]); +#endif + err = -EINVAL; + goto out_err; + } +#ifdef CONFIG_QUOTA + handle_quota_files(s, qf_names, &qfmt); +#endif + + handle_attrs(s); + + /* Add options that are safe here */ + safe_mask |= 1 << REISERFS_SMALLTAIL; + safe_mask |= 1 << REISERFS_LARGETAIL; + safe_mask |= 1 << REISERFS_NO_BORDER; + safe_mask |= 1 << REISERFS_NO_UNHASHED_RELOCATION; + safe_mask |= 1 << REISERFS_HASHED_RELOCATION; + safe_mask |= 1 << REISERFS_TEST4; + safe_mask |= 1 << REISERFS_ATTRS; + safe_mask |= 1 << REISERFS_XATTRS_USER; + safe_mask |= 1 << REISERFS_POSIXACL; + safe_mask |= 1 << REISERFS_BARRIER_FLUSH; + safe_mask |= 1 << REISERFS_BARRIER_NONE; + safe_mask |= 1 << REISERFS_ERROR_RO; + safe_mask |= 1 << REISERFS_ERROR_CONTINUE; + safe_mask |= 1 << REISERFS_ERROR_PANIC; + safe_mask |= 1 << REISERFS_QUOTA; + + /* Update the bitmask, taking care to keep + * the bits we're not allowed to change here */ + REISERFS_SB(s)->s_mount_opt = + (REISERFS_SB(s)-> + s_mount_opt & ~safe_mask) | (mount_options & safe_mask); + + if (commit_max_age != 0 && commit_max_age != (unsigned int)-1) { + journal->j_max_commit_age = commit_max_age; + journal->j_max_trans_age = commit_max_age; + } else if (commit_max_age == 0) { + /* 0 means restore defaults. */ + journal->j_max_commit_age = journal->j_default_max_commit_age; + journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE; + } + + if (blocks) { + err = reiserfs_resize(s, blocks); + if (err != 0) + goto out_err; + } + + if (*mount_flags & MS_RDONLY) { + reiserfs_xattr_init(s, *mount_flags); + /* remount read-only */ + if (s->s_flags & MS_RDONLY) + /* it is read-only already */ + goto out_ok; + /* try to remount file system with read-only permissions */ + if (sb_umount_state(rs) == REISERFS_VALID_FS + || REISERFS_SB(s)->s_mount_state != REISERFS_VALID_FS) { + goto out_ok; + } + + err = journal_begin(&th, s, 10); + if (err) + goto out_err; + + /* Mounting a rw partition read-only. */ + reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); + set_sb_umount_state(rs, REISERFS_SB(s)->s_mount_state); + journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s)); + } else { + /* remount read-write */ + if (!(s->s_flags & MS_RDONLY)) { + reiserfs_xattr_init(s, *mount_flags); + goto out_ok; /* We are read-write already */ + } + + if (reiserfs_is_journal_aborted(journal)) { + err = journal->j_errno; + goto out_err; + } + + handle_data_mode(s, mount_options); + handle_barrier_mode(s, mount_options); + REISERFS_SB(s)->s_mount_state = sb_umount_state(rs); + s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */ + err = journal_begin(&th, s, 10); + if (err) + goto out_err; + + /* Mount a partition which is read-only, read-write */ + reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); + REISERFS_SB(s)->s_mount_state = sb_umount_state(rs); + s->s_flags &= ~MS_RDONLY; + set_sb_umount_state(rs, REISERFS_ERROR_FS); + if (!old_format_only(s)) + set_sb_mnt_count(rs, sb_mnt_count(rs) + 1); + /* mark_buffer_dirty (SB_BUFFER_WITH_SB (s), 1); */ + journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s)); + REISERFS_SB(s)->s_mount_state = REISERFS_VALID_FS; + } + /* this will force a full flush of all journal lists */ + SB_JOURNAL(s)->j_must_wait = 1; + err = journal_end(&th, s, 10); + if (err) + goto out_err; + s->s_dirt = 0; + + if (!(*mount_flags & MS_RDONLY)) { + finish_unfinished(s); + reiserfs_xattr_init(s, *mount_flags); + } + +out_ok: + replace_mount_options(s, new_opts); + unlock_kernel(); + return 0; + +out_err: + kfree(new_opts); + unlock_kernel(); + return err; +} + +static int read_super_block(struct super_block *s, int offset) +{ + struct buffer_head *bh; + struct reiserfs_super_block *rs; + int fs_blocksize; + + bh = sb_bread(s, offset / s->s_blocksize); + if (!bh) { + reiserfs_warning(s, "sh-2006", + "bread failed (dev %s, block %lu, size %lu)", + reiserfs_bdevname(s), offset / s->s_blocksize, + s->s_blocksize); + return 1; + } + + rs = (struct reiserfs_super_block *)bh->b_data; + if (!is_any_reiserfs_magic_string(rs)) { + brelse(bh); + return 1; + } + // + // ok, reiserfs signature (old or new) found in at the given offset + // + fs_blocksize = sb_blocksize(rs); + brelse(bh); + sb_set_blocksize(s, fs_blocksize); + + bh = sb_bread(s, offset / s->s_blocksize); + if (!bh) { + reiserfs_warning(s, "sh-2007", + "bread failed (dev %s, block %lu, size %lu)", + reiserfs_bdevname(s), offset / s->s_blocksize, + s->s_blocksize); + return 1; + } + + rs = (struct reiserfs_super_block *)bh->b_data; + if (sb_blocksize(rs) != s->s_blocksize) { + reiserfs_warning(s, "sh-2011", "can't find a reiserfs " + "filesystem on (dev %s, block %Lu, size %lu)", + reiserfs_bdevname(s), + (unsigned long long)bh->b_blocknr, + s->s_blocksize); + brelse(bh); + return 1; + } + + if (rs->s_v1.s_root_block == cpu_to_le32(-1)) { + brelse(bh); + reiserfs_warning(s, "super-6519", "Unfinished reiserfsck " + "--rebuild-tree run detected. Please run\n" + "reiserfsck --rebuild-tree and wait for a " + "completion. If that fails\n" + "get newer reiserfsprogs package"); + return 1; + } + + SB_BUFFER_WITH_SB(s) = bh; + SB_DISK_SUPER_BLOCK(s) = rs; + + if (is_reiserfs_jr(rs)) { + /* magic is of non-standard journal filesystem, look at s_version to + find which format is in use */ + if (sb_version(rs) == REISERFS_VERSION_2) + reiserfs_info(s, "found reiserfs format \"3.6\"" + " with non-standard journal\n"); + else if (sb_version(rs) == REISERFS_VERSION_1) + reiserfs_info(s, "found reiserfs format \"3.5\"" + " with non-standard journal\n"); + else { + reiserfs_warning(s, "sh-2012", "found unknown " + "format \"%u\" of reiserfs with " + "non-standard magic", sb_version(rs)); + return 1; + } + } else + /* s_version of standard format may contain incorrect information, + so we just look at the magic string */ + reiserfs_info(s, + "found reiserfs format \"%s\" with standard journal\n", + is_reiserfs_3_5(rs) ? "3.5" : "3.6"); + + s->s_op = &reiserfs_sops; + s->s_export_op = &reiserfs_export_ops; +#ifdef CONFIG_QUOTA + s->s_qcop = &reiserfs_qctl_operations; + s->dq_op = &reiserfs_quota_operations; +#endif + + /* new format is limited by the 32 bit wide i_blocks field, want to + ** be one full block below that. + */ + s->s_maxbytes = (512LL << 32) - s->s_blocksize; + return 0; +} + +/* after journal replay, reread all bitmap and super blocks */ +static int reread_meta_blocks(struct super_block *s) +{ + ll_rw_block(READ, 1, &(SB_BUFFER_WITH_SB(s))); + wait_on_buffer(SB_BUFFER_WITH_SB(s)); + if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) { + reiserfs_warning(s, "reiserfs-2504", "error reading the super"); + return 1; + } + + return 0; +} + +///////////////////////////////////////////////////// +// hash detection stuff + +// if root directory is empty - we set default - Yura's - hash and +// warn about it +// FIXME: we look for only one name in a directory. If tea and yura +// bith have the same value - we ask user to send report to the +// mailing list +static __u32 find_hash_out(struct super_block *s) +{ + int retval; + struct inode *inode; + struct cpu_key key; + INITIALIZE_PATH(path); + struct reiserfs_dir_entry de; + __u32 hash = DEFAULT_HASH; + + inode = s->s_root->d_inode; + + do { // Some serious "goto"-hater was there ;) + u32 teahash, r5hash, yurahash; + + make_cpu_key(&key, inode, ~0, TYPE_DIRENTRY, 3); + retval = search_by_entry_key(s, &key, &path, &de); + if (retval == IO_ERROR) { + pathrelse(&path); + return UNSET_HASH; + } + if (retval == NAME_NOT_FOUND) + de.de_entry_num--; + set_de_name_and_namelen(&de); + if (deh_offset(&(de.de_deh[de.de_entry_num])) == DOT_DOT_OFFSET) { + /* allow override in this case */ + if (reiserfs_rupasov_hash(s)) { + hash = YURA_HASH; + } + reiserfs_info(s, "FS seems to be empty, autodetect " + "is using the default hash\n"); + break; + } + r5hash = GET_HASH_VALUE(r5_hash(de.de_name, de.de_namelen)); + teahash = GET_HASH_VALUE(keyed_hash(de.de_name, de.de_namelen)); + yurahash = GET_HASH_VALUE(yura_hash(de.de_name, de.de_namelen)); + if (((teahash == r5hash) + && + (GET_HASH_VALUE(deh_offset(&(de.de_deh[de.de_entry_num]))) + == r5hash)) || ((teahash == yurahash) + && (yurahash == + GET_HASH_VALUE(deh_offset + (& + (de. + de_deh[de. + de_entry_num]))))) + || ((r5hash == yurahash) + && (yurahash == + GET_HASH_VALUE(deh_offset + (&(de.de_deh[de.de_entry_num])))))) { + reiserfs_warning(s, "reiserfs-2506", "Unable to " + "automatically detect hash function. " + "Please mount with -o " + "hash={tea,rupasov,r5}"); + hash = UNSET_HASH; + break; + } + if (GET_HASH_VALUE(deh_offset(&(de.de_deh[de.de_entry_num]))) == + yurahash) + hash = YURA_HASH; + else if (GET_HASH_VALUE + (deh_offset(&(de.de_deh[de.de_entry_num]))) == teahash) + hash = TEA_HASH; + else if (GET_HASH_VALUE + (deh_offset(&(de.de_deh[de.de_entry_num]))) == r5hash) + hash = R5_HASH; + else { + reiserfs_warning(s, "reiserfs-2506", + "Unrecognised hash function"); + hash = UNSET_HASH; + } + } while (0); + + pathrelse(&path); + return hash; +} + +// finds out which hash names are sorted with +static int what_hash(struct super_block *s) +{ + __u32 code; + + code = sb_hash_function_code(SB_DISK_SUPER_BLOCK(s)); + + /* reiserfs_hash_detect() == true if any of the hash mount options + ** were used. We must check them to make sure the user isn't + ** using a bad hash value + */ + if (code == UNSET_HASH || reiserfs_hash_detect(s)) + code = find_hash_out(s); + + if (code != UNSET_HASH && reiserfs_hash_detect(s)) { + /* detection has found the hash, and we must check against the + ** mount options + */ + if (reiserfs_rupasov_hash(s) && code != YURA_HASH) { + reiserfs_warning(s, "reiserfs-2507", + "Error, %s hash detected, " + "unable to force rupasov hash", + reiserfs_hashname(code)); + code = UNSET_HASH; + } else if (reiserfs_tea_hash(s) && code != TEA_HASH) { + reiserfs_warning(s, "reiserfs-2508", + "Error, %s hash detected, " + "unable to force tea hash", + reiserfs_hashname(code)); + code = UNSET_HASH; + } else if (reiserfs_r5_hash(s) && code != R5_HASH) { + reiserfs_warning(s, "reiserfs-2509", + "Error, %s hash detected, " + "unable to force r5 hash", + reiserfs_hashname(code)); + code = UNSET_HASH; + } + } else { + /* find_hash_out was not called or could not determine the hash */ + if (reiserfs_rupasov_hash(s)) { + code = YURA_HASH; + } else if (reiserfs_tea_hash(s)) { + code = TEA_HASH; + } else if (reiserfs_r5_hash(s)) { + code = R5_HASH; + } + } + + /* if we are mounted RW, and we have a new valid hash code, update + ** the super + */ + if (code != UNSET_HASH && + !(s->s_flags & MS_RDONLY) && + code != sb_hash_function_code(SB_DISK_SUPER_BLOCK(s))) { + set_sb_hash_function_code(SB_DISK_SUPER_BLOCK(s), code); + } + return code; +} + +// return pointer to appropriate function +static hashf_t hash_function(struct super_block *s) +{ + switch (what_hash(s)) { + case TEA_HASH: + reiserfs_info(s, "Using tea hash to sort names\n"); + return keyed_hash; + case YURA_HASH: + reiserfs_info(s, "Using rupasov hash to sort names\n"); + return yura_hash; + case R5_HASH: + reiserfs_info(s, "Using r5 hash to sort names\n"); + return r5_hash; + } + return NULL; +} + +// this is used to set up correct value for old partitions +static int function2code(hashf_t func) +{ + if (func == keyed_hash) + return TEA_HASH; + if (func == yura_hash) + return YURA_HASH; + if (func == r5_hash) + return R5_HASH; + + BUG(); // should never happen + + return 0; +} + +#define SWARN(silent, s, id, ...) \ + if (!(silent)) \ + reiserfs_warning(s, id, __VA_ARGS__) + +static int reiserfs_fill_super(struct super_block *s, void *data, int silent) +{ + struct inode *root_inode; + struct reiserfs_transaction_handle th; + int old_format = 0; + unsigned long blocks; + unsigned int commit_max_age = 0; + int jinit_done = 0; + struct reiserfs_iget_args args; + struct reiserfs_super_block *rs; + char *jdev_name; + struct reiserfs_sb_info *sbi; + int errval = -EINVAL; + char *qf_names[MAXQUOTAS] = {}; + unsigned int qfmt = 0; + + save_mount_options(s, data); + + sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL); + if (!sbi) { + errval = -ENOMEM; + goto error; + } + s->s_fs_info = sbi; + /* Set default values for options: non-aggressive tails, RO on errors */ + REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL); + REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO); + /* no preallocation minimum, be smart in + reiserfs_file_write instead */ + REISERFS_SB(s)->s_alloc_options.preallocmin = 0; + /* Preallocate by 16 blocks (17-1) at once */ + REISERFS_SB(s)->s_alloc_options.preallocsize = 17; + /* setup default block allocator options */ + reiserfs_init_alloc_options(s); + + jdev_name = NULL; + if (reiserfs_parse_options + (s, (char *)data, &(sbi->s_mount_opt), &blocks, &jdev_name, + &commit_max_age, qf_names, &qfmt) == 0) { + goto error; + } +#ifdef CONFIG_QUOTA + handle_quota_files(s, qf_names, &qfmt); +#endif + + if (blocks) { + SWARN(silent, s, "jmacd-7", "resize option for remount only"); + goto error; + } + + /* try old format (undistributed bitmap, super block in 8-th 1k block of a device) */ + if (!read_super_block(s, REISERFS_OLD_DISK_OFFSET_IN_BYTES)) + old_format = 1; + /* try new format (64-th 1k block), which can contain reiserfs super block */ + else if (read_super_block(s, REISERFS_DISK_OFFSET_IN_BYTES)) { + SWARN(silent, s, "sh-2021", "can not find reiserfs on %s", + reiserfs_bdevname(s)); + goto error; + } + + rs = SB_DISK_SUPER_BLOCK(s); + /* Let's do basic sanity check to verify that underlying device is not + smaller than the filesystem. If the check fails then abort and scream, + because bad stuff will happen otherwise. */ + if (s->s_bdev && s->s_bdev->bd_inode + && i_size_read(s->s_bdev->bd_inode) < + sb_block_count(rs) * sb_blocksize(rs)) { + SWARN(silent, s, "", "Filesystem cannot be " + "mounted because it is bigger than the device"); + SWARN(silent, s, "", "You may need to run fsck " + "or increase size of your LVM partition"); + SWARN(silent, s, "", "Or may be you forgot to " + "reboot after fdisk when it told you to"); + goto error; + } + + sbi->s_mount_state = SB_REISERFS_STATE(s); + sbi->s_mount_state = REISERFS_VALID_FS; + + if ((errval = reiserfs_init_bitmap_cache(s))) { + SWARN(silent, s, "jmacd-8", "unable to read bitmap"); + goto error; + } + errval = -EINVAL; +#ifdef CONFIG_REISERFS_CHECK + SWARN(silent, s, "", "CONFIG_REISERFS_CHECK is set ON"); + SWARN(silent, s, "", "- it is slow mode for debugging."); +#endif + + /* make data=ordered the default */ + if (!reiserfs_data_log(s) && !reiserfs_data_ordered(s) && + !reiserfs_data_writeback(s)) { + REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_DATA_ORDERED); + } + + if (reiserfs_data_log(s)) { + reiserfs_info(s, "using journaled data mode\n"); + } else if (reiserfs_data_ordered(s)) { + reiserfs_info(s, "using ordered data mode\n"); + } else { + reiserfs_info(s, "using writeback data mode\n"); + } + if (reiserfs_barrier_flush(s)) { + printk("reiserfs: using flush barriers\n"); + } + // set_device_ro(s->s_dev, 1) ; + if (journal_init(s, jdev_name, old_format, commit_max_age)) { + SWARN(silent, s, "sh-2022", + "unable to initialize journal space"); + goto error; + } else { + jinit_done = 1; /* once this is set, journal_release must be called + ** if we error out of the mount + */ + } + if (reread_meta_blocks(s)) { + SWARN(silent, s, "jmacd-9", + "unable to reread meta blocks after journal init"); + goto error; + } + + if (replay_only(s)) + goto error; + + if (bdev_read_only(s->s_bdev) && !(s->s_flags & MS_RDONLY)) { + SWARN(silent, s, "clm-7000", + "Detected readonly device, marking FS readonly"); + s->s_flags |= MS_RDONLY; + } + args.objectid = REISERFS_ROOT_OBJECTID; + args.dirid = REISERFS_ROOT_PARENT_OBJECTID; + root_inode = + iget5_locked(s, REISERFS_ROOT_OBJECTID, reiserfs_find_actor, + reiserfs_init_locked_inode, (void *)(&args)); + if (!root_inode) { + SWARN(silent, s, "jmacd-10", "get root inode failed"); + goto error; + } + + if (root_inode->i_state & I_NEW) { + reiserfs_read_locked_inode(root_inode, &args); + unlock_new_inode(root_inode); + } + + s->s_root = d_alloc_root(root_inode); + if (!s->s_root) { + iput(root_inode); + goto error; + } + // define and initialize hash function + sbi->s_hash_function = hash_function(s); + if (sbi->s_hash_function == NULL) { + dput(s->s_root); + s->s_root = NULL; + goto error; + } + + if (is_reiserfs_3_5(rs) + || (is_reiserfs_jr(rs) && SB_VERSION(s) == REISERFS_VERSION_1)) + set_bit(REISERFS_3_5, &(sbi->s_properties)); + else if (old_format) + set_bit(REISERFS_OLD_FORMAT, &(sbi->s_properties)); + else + set_bit(REISERFS_3_6, &(sbi->s_properties)); + + if (!(s->s_flags & MS_RDONLY)) { + + errval = journal_begin(&th, s, 1); + if (errval) { + dput(s->s_root); + s->s_root = NULL; + goto error; + } + reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); + + set_sb_umount_state(rs, REISERFS_ERROR_FS); + set_sb_fs_state(rs, 0); + + /* Clear out s_bmap_nr if it would wrap. We can handle this + * case, but older revisions can't. This will cause the + * file system to fail mount on those older implementations, + * avoiding corruption. -jeffm */ + if (bmap_would_wrap(reiserfs_bmap_count(s)) && + sb_bmap_nr(rs) != 0) { + reiserfs_warning(s, "super-2030", "This file system " + "claims to use %u bitmap blocks in " + "its super block, but requires %u. " + "Clearing to zero.", sb_bmap_nr(rs), + reiserfs_bmap_count(s)); + + set_sb_bmap_nr(rs, 0); + } + + if (old_format_only(s)) { + /* filesystem of format 3.5 either with standard or non-standard + journal */ + if (convert_reiserfs(s)) { + /* and -o conv is given */ + if (!silent) + reiserfs_info(s, + "converting 3.5 filesystem to the 3.6 format"); + + if (is_reiserfs_3_5(rs)) + /* put magic string of 3.6 format. 2.2 will not be able to + mount this filesystem anymore */ + memcpy(rs->s_v1.s_magic, + reiserfs_3_6_magic_string, + sizeof + (reiserfs_3_6_magic_string)); + + set_sb_version(rs, REISERFS_VERSION_2); + reiserfs_convert_objectid_map_v1(s); + set_bit(REISERFS_3_6, &(sbi->s_properties)); + clear_bit(REISERFS_3_5, &(sbi->s_properties)); + } else if (!silent) { + reiserfs_info(s, "using 3.5.x disk format\n"); + } + } else + set_sb_mnt_count(rs, sb_mnt_count(rs) + 1); + + + journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s)); + errval = journal_end(&th, s, 1); + if (errval) { + dput(s->s_root); + s->s_root = NULL; + goto error; + } + + if ((errval = reiserfs_lookup_privroot(s)) || + (errval = reiserfs_xattr_init(s, s->s_flags))) { + dput(s->s_root); + s->s_root = NULL; + goto error; + } + + /* look for files which were to be removed in previous session */ + finish_unfinished(s); + } else { + if (old_format_only(s) && !silent) { + reiserfs_info(s, "using 3.5.x disk format\n"); + } + + if ((errval = reiserfs_lookup_privroot(s)) || + (errval = reiserfs_xattr_init(s, s->s_flags))) { + dput(s->s_root); + s->s_root = NULL; + goto error; + } + } + // mark hash in super block: it could be unset. overwrite should be ok + set_sb_hash_function_code(rs, function2code(sbi->s_hash_function)); + + handle_attrs(s); + + reiserfs_proc_info_init(s); + + init_waitqueue_head(&(sbi->s_wait)); + spin_lock_init(&sbi->bitmap_lock); + + return (0); + +error: + if (jinit_done) { /* kill the commit thread, free journal ram */ + journal_release_error(NULL, s); + } + + reiserfs_free_bitmap_cache(s); + if (SB_BUFFER_WITH_SB(s)) + brelse(SB_BUFFER_WITH_SB(s)); +#ifdef CONFIG_QUOTA + { + int j; + for (j = 0; j < MAXQUOTAS; j++) + kfree(qf_names[j]); + } +#endif + kfree(sbi); + + s->s_fs_info = NULL; + return errval; +} + +static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(dentry->d_sb); + + buf->f_namelen = (REISERFS_MAX_NAME(s->s_blocksize)); + buf->f_bfree = sb_free_blocks(rs); + buf->f_bavail = buf->f_bfree; + buf->f_blocks = sb_block_count(rs) - sb_bmap_nr(rs) - 1; + buf->f_bsize = dentry->d_sb->s_blocksize; + /* changed to accommodate gcc folks. */ + buf->f_type = REISERFS_SUPER_MAGIC; + buf->f_fsid.val[0] = (u32)crc32_le(0, rs->s_uuid, sizeof(rs->s_uuid)/2); + buf->f_fsid.val[1] = (u32)crc32_le(0, rs->s_uuid + sizeof(rs->s_uuid)/2, + sizeof(rs->s_uuid)/2); + + return 0; +} + +#ifdef CONFIG_QUOTA +static int reiserfs_write_dquot(struct dquot *dquot) +{ + struct reiserfs_transaction_handle th; + int ret, err; + + reiserfs_write_lock(dquot->dq_sb); + ret = + journal_begin(&th, dquot->dq_sb, + REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); + if (ret) + goto out; + ret = dquot_commit(dquot); + err = + journal_end(&th, dquot->dq_sb, + REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); + if (!ret && err) + ret = err; + out: + reiserfs_write_unlock(dquot->dq_sb); + return ret; +} + +static int reiserfs_acquire_dquot(struct dquot *dquot) +{ + struct reiserfs_transaction_handle th; + int ret, err; + + reiserfs_write_lock(dquot->dq_sb); + ret = + journal_begin(&th, dquot->dq_sb, + REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); + if (ret) + goto out; + ret = dquot_acquire(dquot); + err = + journal_end(&th, dquot->dq_sb, + REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); + if (!ret && err) + ret = err; + out: + reiserfs_write_unlock(dquot->dq_sb); + return ret; +} + +static int reiserfs_release_dquot(struct dquot *dquot) +{ + struct reiserfs_transaction_handle th; + int ret, err; + + reiserfs_write_lock(dquot->dq_sb); + ret = + journal_begin(&th, dquot->dq_sb, + REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb)); + if (ret) { + /* Release dquot anyway to avoid endless cycle in dqput() */ + dquot_release(dquot); + goto out; + } + ret = dquot_release(dquot); + err = + journal_end(&th, dquot->dq_sb, + REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb)); + if (!ret && err) + ret = err; + out: + reiserfs_write_unlock(dquot->dq_sb); + return ret; +} + +static int reiserfs_mark_dquot_dirty(struct dquot *dquot) +{ + /* Are we journaling quotas? */ + if (REISERFS_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] || + REISERFS_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) { + dquot_mark_dquot_dirty(dquot); + return reiserfs_write_dquot(dquot); + } else + return dquot_mark_dquot_dirty(dquot); +} + +static int reiserfs_write_info(struct super_block *sb, int type) +{ + struct reiserfs_transaction_handle th; + int ret, err; + + /* Data block + inode block */ + reiserfs_write_lock(sb); + ret = journal_begin(&th, sb, 2); + if (ret) + goto out; + ret = dquot_commit_info(sb, type); + err = journal_end(&th, sb, 2); + if (!ret && err) + ret = err; + out: + reiserfs_write_unlock(sb); + return ret; +} + +/* + * Turn on quotas during mount time - we need to find the quota file and such... + */ +static int reiserfs_quota_on_mount(struct super_block *sb, int type) +{ + return vfs_quota_on_mount(sb, REISERFS_SB(sb)->s_qf_names[type], + REISERFS_SB(sb)->s_jquota_fmt, type); +} + +/* + * Standard function to be called on quota_on + */ +static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, + char *name, int remount) +{ + int err; + struct path path; + struct inode *inode; + struct reiserfs_transaction_handle th; + + if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA))) + return -EINVAL; + /* No more checks needed? Path and format_id are bogus anyway... */ + if (remount) + return vfs_quota_on(sb, type, format_id, name, 1); + err = kern_path(name, LOOKUP_FOLLOW, &path); + if (err) + return err; + /* Quotafile not on the same filesystem? */ + if (path.mnt->mnt_sb != sb) { + err = -EXDEV; + goto out; + } + inode = path.dentry->d_inode; + /* We must not pack tails for quota files on reiserfs for quota IO to work */ + if (!(REISERFS_I(inode)->i_flags & i_nopack_mask)) { + err = reiserfs_unpack(inode, NULL); + if (err) { + reiserfs_warning(sb, "super-6520", + "Unpacking tail of quota file failed" + " (%d). Cannot turn on quotas.", err); + err = -EINVAL; + goto out; + } + mark_inode_dirty(inode); + } + /* Journaling quota? */ + if (REISERFS_SB(sb)->s_qf_names[type]) { + /* Quotafile not of fs root? */ + if (path.dentry->d_parent != sb->s_root) + reiserfs_warning(sb, "super-6521", + "Quota file not on filesystem root. " + "Journalled quota will not work."); + } + + /* + * When we journal data on quota file, we have to flush journal to see + * all updates to the file when we bypass pagecache... + */ + if (reiserfs_file_data_log(inode)) { + /* Just start temporary transaction and finish it */ + err = journal_begin(&th, sb, 1); + if (err) + goto out; + err = journal_end_sync(&th, sb, 1); + if (err) + goto out; + } + err = vfs_quota_on_path(sb, type, format_id, &path); +out: + path_put(&path); + return err; +} + +/* Read data from quotafile - avoid pagecache and such because we cannot afford + * acquiring the locks... As quota files are never truncated and quota code + * itself serializes the operations (and noone else should touch the files) + * we don't have to be afraid of races */ +static ssize_t reiserfs_quota_read(struct super_block *sb, int type, char *data, + size_t len, loff_t off) +{ + struct inode *inode = sb_dqopt(sb)->files[type]; + unsigned long blk = off >> sb->s_blocksize_bits; + int err = 0, offset = off & (sb->s_blocksize - 1), tocopy; + size_t toread; + struct buffer_head tmp_bh, *bh; + loff_t i_size = i_size_read(inode); + + if (off > i_size) + return 0; + if (off + len > i_size) + len = i_size - off; + toread = len; + while (toread > 0) { + tocopy = + sb->s_blocksize - offset < + toread ? sb->s_blocksize - offset : toread; + tmp_bh.b_state = 0; + /* Quota files are without tails so we can safely use this function */ + reiserfs_write_lock(sb); + err = reiserfs_get_block(inode, blk, &tmp_bh, 0); + reiserfs_write_unlock(sb); + if (err) + return err; + if (!buffer_mapped(&tmp_bh)) /* A hole? */ + memset(data, 0, tocopy); + else { + bh = sb_bread(sb, tmp_bh.b_blocknr); + if (!bh) + return -EIO; + memcpy(data, bh->b_data + offset, tocopy); + brelse(bh); + } + offset = 0; + toread -= tocopy; + data += tocopy; + blk++; + } + return len; +} + +/* Write to quotafile (we know the transaction is already started and has + * enough credits) */ +static ssize_t reiserfs_quota_write(struct super_block *sb, int type, + const char *data, size_t len, loff_t off) +{ + struct inode *inode = sb_dqopt(sb)->files[type]; + unsigned long blk = off >> sb->s_blocksize_bits; + int err = 0, offset = off & (sb->s_blocksize - 1), tocopy; + int journal_quota = REISERFS_SB(sb)->s_qf_names[type] != NULL; + size_t towrite = len; + struct buffer_head tmp_bh, *bh; + + if (!current->journal_info) { + printk(KERN_WARNING "reiserfs: Quota write (off=%Lu, len=%Lu)" + " cancelled because transaction is not started.\n", + (unsigned long long)off, (unsigned long long)len); + return -EIO; + } + mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); + while (towrite > 0) { + tocopy = sb->s_blocksize - offset < towrite ? + sb->s_blocksize - offset : towrite; + tmp_bh.b_state = 0; + err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE); + if (err) + goto out; + if (offset || tocopy != sb->s_blocksize) + bh = sb_bread(sb, tmp_bh.b_blocknr); + else + bh = sb_getblk(sb, tmp_bh.b_blocknr); + if (!bh) { + err = -EIO; + goto out; + } + lock_buffer(bh); + memcpy(bh->b_data + offset, data, tocopy); + flush_dcache_page(bh->b_page); + set_buffer_uptodate(bh); + unlock_buffer(bh); + reiserfs_prepare_for_journal(sb, bh, 1); + journal_mark_dirty(current->journal_info, sb, bh); + if (!journal_quota) + reiserfs_add_ordered_list(inode, bh); + brelse(bh); + offset = 0; + towrite -= tocopy; + data += tocopy; + blk++; + } +out: + if (len == towrite) { + mutex_unlock(&inode->i_mutex); + return err; + } + if (inode->i_size < off + len - towrite) + i_size_write(inode, off + len - towrite); + inode->i_version++; + inode->i_mtime = inode->i_ctime = CURRENT_TIME; + mark_inode_dirty(inode); + mutex_unlock(&inode->i_mutex); + return len - towrite; +} + +#endif + +static int get_super_block(struct file_system_type *fs_type, + int flags, const char *dev_name, + void *data, struct vfsmount *mnt) +{ + return get_sb_bdev(fs_type, flags, dev_name, data, reiserfs_fill_super, + mnt); +} + +static int __init init_reiserfs_fs(void) +{ + int ret; + + if ((ret = init_inodecache())) { + return ret; + } + + reiserfs_proc_info_global_init(); + reiserfs_proc_register_global("version", + reiserfs_global_version_in_proc); + + ret = register_filesystem(&reiserfs_fs_type); + + if (ret == 0) { + return 0; + } + + reiserfs_proc_unregister_global("version"); + reiserfs_proc_info_global_done(); + destroy_inodecache(); + + return ret; +} + +static void __exit exit_reiserfs_fs(void) +{ + reiserfs_proc_unregister_global("version"); + reiserfs_proc_info_global_done(); + unregister_filesystem(&reiserfs_fs_type); + destroy_inodecache(); +} + +struct file_system_type reiserfs_fs_type = { + .owner = THIS_MODULE, + .name = "reiserfs", + .get_sb = get_super_block, + .kill_sb = reiserfs_kill_sb, + .fs_flags = FS_REQUIRES_DEV, +}; + +MODULE_DESCRIPTION("ReiserFS journaled filesystem"); +MODULE_AUTHOR("Hans Reiser "); +MODULE_LICENSE("GPL"); + +module_init(init_reiserfs_fs); +module_exit(exit_reiserfs_fs); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/reiserfs/xattr.c kernel-2.6.32.54.vs/linux-2.6.32/fs/reiserfs/xattr.c --- kernel-2.6.32.54/linux-2.6.32/fs/reiserfs/xattr.c 2012-01-16 15:01:39.640726397 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/reiserfs/xattr.c 2012-01-16 14:51:21.897408771 +0100 @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/stat.c kernel-2.6.32.54.vs/linux-2.6.32/fs/stat.c --- kernel-2.6.32.54/linux-2.6.32/fs/stat.c 2012-01-16 15:01:39.652726354 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/stat.c 2012-01-16 14:51:21.897408771 +0100 @@ -26,6 +26,7 @@ stat->nlink = inode->i_nlink; stat->uid = inode->i_uid; stat->gid = inode->i_gid; + stat->tag = inode->i_tag; stat->rdev = inode->i_rdev; stat->atime = inode->i_atime; stat->mtime = inode->i_mtime; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/super.c kernel-2.6.32.54.vs/linux-2.6.32/fs/super.c --- kernel-2.6.32.54/linux-2.6.32/fs/super.c 2012-01-16 15:01:40.272724159 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/super.c 2012-01-16 14:51:21.897408771 +0100 @@ -37,6 +37,9 @@ #include #include #include +#include +#include +#include #include #include "internal.h" @@ -916,12 +919,18 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data) { struct vfsmount *mnt; + struct super_block *sb; char *secdata = NULL; int error; if (!type) return ERR_PTR(-ENODEV); + error = -EPERM; + if ((type->fs_flags & FS_BINARY_MOUNTDATA) && + !vx_capable(CAP_SYS_ADMIN, VXC_BINARY_MOUNT)) + goto out; + error = -ENOMEM; mnt = alloc_vfsmnt(name); if (!mnt) @@ -940,9 +949,17 @@ error = type->get_sb(type, flags, name, data, mnt); if (error < 0) goto out_free_secdata; - BUG_ON(!mnt->mnt_sb); - error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata); + sb = mnt->mnt_sb; + BUG_ON(!sb); + + error = -EPERM; + if (!vx_capable(CAP_SYS_ADMIN, VXC_BINARY_MOUNT) && !sb->s_bdev && + (sb->s_magic != PROC_SUPER_MAGIC) && + (sb->s_magic != DEVPTS_SUPER_MAGIC)) + goto out_sb; + + error = security_sb_kern_mount(sb, flags, secdata); if (error) goto out_sb; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/super.c.orig kernel-2.6.32.54.vs/linux-2.6.32/fs/super.c.orig --- kernel-2.6.32.54/linux-2.6.32/fs/super.c.orig 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/super.c.orig 2012-01-16 14:47:19.470254871 +0100 @@ -0,0 +1,1021 @@ +/* + * linux/fs/super.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + * + * super.c contains code to handle: - mount structures + * - super-block tables + * - filesystem drivers list + * - mount system call + * - umount system call + * - ustat system call + * + * GK 2/5/95 - Changed to support mounting the root fs via NFS + * + * Added kerneld support: Jacques Gelinas and Bjorn Ekwall + * Added change_root: Werner Almesberger & Hans Lermen, Feb '96 + * Added options to /proc/mounts: + * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996. + * Added devfs support: Richard Gooch , 13-JAN-1998 + * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for the emergency remount stuff */ +#include +#include +#include +#include +#include +#include "internal.h" + + +LIST_HEAD(super_blocks); +EXPORT_SYMBOL_GPL(super_blocks); + +DEFINE_SPINLOCK(sb_lock); + +/** + * alloc_super - create new superblock + * @type: filesystem type superblock should belong to + * + * Allocates and initializes a new &struct super_block. alloc_super() + * returns a pointer new superblock or %NULL if allocation had failed. + */ +static struct super_block *alloc_super(struct file_system_type *type) +{ + struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER); + static const struct super_operations default_op; + + if (s) { + if (security_sb_alloc(s)) { + kfree(s); + s = NULL; + goto out; + } + INIT_LIST_HEAD(&s->s_files); + INIT_LIST_HEAD(&s->s_instances); + INIT_HLIST_HEAD(&s->s_anon); + INIT_LIST_HEAD(&s->s_inodes); + INIT_LIST_HEAD(&s->s_dentry_lru); + init_rwsem(&s->s_umount); + mutex_init(&s->s_lock); + lockdep_set_class(&s->s_umount, &type->s_umount_key); + /* + * The locking rules for s_lock are up to the + * filesystem. For example ext3fs has different + * lock ordering than usbfs: + */ + lockdep_set_class(&s->s_lock, &type->s_lock_key); + /* + * sget() can have s_umount recursion. + * + * When it cannot find a suitable sb, it allocates a new + * one (this one), and tries again to find a suitable old + * one. + * + * In case that succeeds, it will acquire the s_umount + * lock of the old one. Since these are clearly distrinct + * locks, and this object isn't exposed yet, there's no + * risk of deadlocks. + * + * Annotate this by putting this lock in a different + * subclass. + */ + down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING); + s->s_count = S_BIAS; + atomic_set(&s->s_active, 1); + mutex_init(&s->s_vfs_rename_mutex); + mutex_init(&s->s_dquot.dqio_mutex); + mutex_init(&s->s_dquot.dqonoff_mutex); + init_rwsem(&s->s_dquot.dqptr_sem); + init_waitqueue_head(&s->s_wait_unfrozen); + s->s_maxbytes = MAX_NON_LFS; + s->dq_op = sb_dquot_ops; + s->s_qcop = sb_quotactl_ops; + s->s_op = &default_op; + s->s_time_gran = 1000000000; + } +out: + return s; +} + +/** + * destroy_super - frees a superblock + * @s: superblock to free + * + * Frees a superblock. + */ +static inline void destroy_super(struct super_block *s) +{ + security_sb_free(s); + kfree(s->s_subtype); + kfree(s->s_options); + kfree(s); +} + +/* Superblock refcounting */ + +/* + * Drop a superblock's refcount. Returns non-zero if the superblock was + * destroyed. The caller must hold sb_lock. + */ +static int __put_super(struct super_block *sb) +{ + int ret = 0; + + if (!--sb->s_count) { + destroy_super(sb); + ret = 1; + } + return ret; +} + +/* + * Drop a superblock's refcount. + * Returns non-zero if the superblock is about to be destroyed and + * at least is already removed from super_blocks list, so if we are + * making a loop through super blocks then we need to restart. + * The caller must hold sb_lock. + */ +int __put_super_and_need_restart(struct super_block *sb) +{ + /* check for race with generic_shutdown_super() */ + if (list_empty(&sb->s_list)) { + /* super block is removed, need to restart... */ + __put_super(sb); + return 1; + } + /* can't be the last, since s_list is still in use */ + sb->s_count--; + BUG_ON(sb->s_count == 0); + return 0; +} + +/** + * put_super - drop a temporary reference to superblock + * @sb: superblock in question + * + * Drops a temporary reference, frees superblock if there's no + * references left. + */ +void put_super(struct super_block *sb) +{ + spin_lock(&sb_lock); + __put_super(sb); + spin_unlock(&sb_lock); +} + + +/** + * deactivate_super - drop an active reference to superblock + * @s: superblock to deactivate + * + * Drops an active reference to superblock, acquiring a temprory one if + * there is no active references left. In that case we lock superblock, + * tell fs driver to shut it down and drop the temporary reference we + * had just acquired. + */ +void deactivate_super(struct super_block *s) +{ + struct file_system_type *fs = s->s_type; + if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { + s->s_count -= S_BIAS-1; + spin_unlock(&sb_lock); + vfs_dq_off(s, 0); + down_write(&s->s_umount); + fs->kill_sb(s); + put_filesystem(fs); + put_super(s); + } +} + +EXPORT_SYMBOL(deactivate_super); + +/** + * deactivate_locked_super - drop an active reference to superblock + * @s: superblock to deactivate + * + * Equivalent of up_write(&s->s_umount); deactivate_super(s);, except that + * it does not unlock it until it's all over. As the result, it's safe to + * use to dispose of new superblock on ->get_sb() failure exits - nobody + * will see the sucker until it's all over. Equivalent using up_write + + * deactivate_super is safe for that purpose only if superblock is either + * safe to use or has NULL ->s_root when we unlock. + */ +void deactivate_locked_super(struct super_block *s) +{ + struct file_system_type *fs = s->s_type; + if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { + s->s_count -= S_BIAS-1; + spin_unlock(&sb_lock); + vfs_dq_off(s, 0); + fs->kill_sb(s); + put_filesystem(fs); + put_super(s); + } else { + up_write(&s->s_umount); + } +} + +EXPORT_SYMBOL(deactivate_locked_super); + +/** + * grab_super - acquire an active reference + * @s: reference we are trying to make active + * + * Tries to acquire an active reference. grab_super() is used when we + * had just found a superblock in super_blocks or fs_type->fs_supers + * and want to turn it into a full-blown active reference. grab_super() + * is called with sb_lock held and drops it. Returns 1 in case of + * success, 0 if we had failed (superblock contents was already dead or + * dying when grab_super() had been called). + */ +static int grab_super(struct super_block *s) __releases(sb_lock) +{ + s->s_count++; + spin_unlock(&sb_lock); + down_write(&s->s_umount); + if (s->s_root) { + spin_lock(&sb_lock); + if (s->s_count > S_BIAS) { + atomic_inc(&s->s_active); + s->s_count--; + spin_unlock(&sb_lock); + return 1; + } + spin_unlock(&sb_lock); + } + up_write(&s->s_umount); + put_super(s); + yield(); + return 0; +} + +/* + * Superblock locking. We really ought to get rid of these two. + */ +void lock_super(struct super_block * sb) +{ + get_fs_excl(); + mutex_lock(&sb->s_lock); +} + +void unlock_super(struct super_block * sb) +{ + put_fs_excl(); + mutex_unlock(&sb->s_lock); +} + +EXPORT_SYMBOL(lock_super); +EXPORT_SYMBOL(unlock_super); + +/** + * generic_shutdown_super - common helper for ->kill_sb() + * @sb: superblock to kill + * + * generic_shutdown_super() does all fs-independent work on superblock + * shutdown. Typical ->kill_sb() should pick all fs-specific objects + * that need destruction out of superblock, call generic_shutdown_super() + * and release aforementioned objects. Note: dentries and inodes _are_ + * taken care of and do not need specific handling. + * + * Upon calling this function, the filesystem may no longer alter or + * rearrange the set of dentries belonging to this super_block, nor may it + * change the attachments of dentries to inodes. + */ +void generic_shutdown_super(struct super_block *sb) +{ + const struct super_operations *sop = sb->s_op; + + + if (sb->s_root) { + shrink_dcache_for_umount(sb); + sync_filesystem(sb); + get_fs_excl(); + sb->s_flags &= ~MS_ACTIVE; + + /* bad name - it should be evict_inodes() */ + invalidate_inodes(sb); + + if (sop->put_super) + sop->put_super(sb); + + /* Forget any remaining inodes */ + if (invalidate_inodes(sb)) { + printk("VFS: Busy inodes after unmount of %s. " + "Self-destruct in 5 seconds. Have a nice day...\n", + sb->s_id); + } + put_fs_excl(); + } + spin_lock(&sb_lock); + /* should be initialized for __put_super_and_need_restart() */ + list_del_init(&sb->s_list); + list_del(&sb->s_instances); + spin_unlock(&sb_lock); + up_write(&sb->s_umount); +} + +EXPORT_SYMBOL(generic_shutdown_super); + +/** + * sget - find or create a superblock + * @type: filesystem type superblock should belong to + * @test: comparison callback + * @set: setup callback + * @data: argument to each of them + */ +struct super_block *sget(struct file_system_type *type, + int (*test)(struct super_block *,void *), + int (*set)(struct super_block *,void *), + void *data) +{ + struct super_block *s = NULL; + struct super_block *old; + int err; + +retry: + spin_lock(&sb_lock); + if (test) { + list_for_each_entry(old, &type->fs_supers, s_instances) { + if (!test(old, data)) + continue; + if (!grab_super(old)) + goto retry; + if (s) { + up_write(&s->s_umount); + destroy_super(s); + } + return old; + } + } + if (!s) { + spin_unlock(&sb_lock); + s = alloc_super(type); + if (!s) + return ERR_PTR(-ENOMEM); + goto retry; + } + + err = set(s, data); + if (err) { + spin_unlock(&sb_lock); + up_write(&s->s_umount); + destroy_super(s); + return ERR_PTR(err); + } + s->s_type = type; + strlcpy(s->s_id, type->name, sizeof(s->s_id)); + list_add_tail(&s->s_list, &super_blocks); + list_add(&s->s_instances, &type->fs_supers); + spin_unlock(&sb_lock); + get_filesystem(type); + return s; +} + +EXPORT_SYMBOL(sget); + +void drop_super(struct super_block *sb) +{ + up_read(&sb->s_umount); + put_super(sb); +} + +EXPORT_SYMBOL(drop_super); + +/** + * sync_supers - helper for periodic superblock writeback + * + * Call the write_super method if present on all dirty superblocks in + * the system. This is for the periodic writeback used by most older + * filesystems. For data integrity superblock writeback use + * sync_filesystems() instead. + * + * Note: check the dirty flag before waiting, so we don't + * hold up the sync while mounting a device. (The newly + * mounted device won't need syncing.) + */ +void sync_supers(void) +{ + struct super_block *sb; + + spin_lock(&sb_lock); +restart: + list_for_each_entry(sb, &super_blocks, s_list) { + if (sb->s_op->write_super && sb->s_dirt) { + sb->s_count++; + spin_unlock(&sb_lock); + + down_read(&sb->s_umount); + if (sb->s_root && sb->s_dirt) + sb->s_op->write_super(sb); + up_read(&sb->s_umount); + + spin_lock(&sb_lock); + if (__put_super_and_need_restart(sb)) + goto restart; + } + } + spin_unlock(&sb_lock); +} + +/** + * get_super - get the superblock of a device + * @bdev: device to get the superblock for + * + * Scans the superblock list and finds the superblock of the file system + * mounted on the device given. %NULL is returned if no match is found. + */ + +struct super_block * get_super(struct block_device *bdev) +{ + struct super_block *sb; + + if (!bdev) + return NULL; + + spin_lock(&sb_lock); +rescan: + list_for_each_entry(sb, &super_blocks, s_list) { + if (sb->s_bdev == bdev) { + sb->s_count++; + spin_unlock(&sb_lock); + down_read(&sb->s_umount); + if (sb->s_root) + return sb; + up_read(&sb->s_umount); + /* restart only when sb is no longer on the list */ + spin_lock(&sb_lock); + if (__put_super_and_need_restart(sb)) + goto rescan; + } + } + spin_unlock(&sb_lock); + return NULL; +} + +EXPORT_SYMBOL(get_super); + +/** + * get_active_super - get an active reference to the superblock of a device + * @bdev: device to get the superblock for + * + * Scans the superblock list and finds the superblock of the file system + * mounted on the device given. Returns the superblock with an active + * reference and s_umount held exclusively or %NULL if none was found. + */ +struct super_block *get_active_super(struct block_device *bdev) +{ + struct super_block *sb; + + if (!bdev) + return NULL; + + spin_lock(&sb_lock); + list_for_each_entry(sb, &super_blocks, s_list) { + if (sb->s_bdev != bdev) + continue; + + sb->s_count++; + spin_unlock(&sb_lock); + down_write(&sb->s_umount); + if (sb->s_root) { + spin_lock(&sb_lock); + if (sb->s_count > S_BIAS) { + atomic_inc(&sb->s_active); + sb->s_count--; + spin_unlock(&sb_lock); + return sb; + } + spin_unlock(&sb_lock); + } + up_write(&sb->s_umount); + put_super(sb); + yield(); + spin_lock(&sb_lock); + } + spin_unlock(&sb_lock); + return NULL; +} + +struct super_block * user_get_super(dev_t dev) +{ + struct super_block *sb; + + spin_lock(&sb_lock); +rescan: + list_for_each_entry(sb, &super_blocks, s_list) { + if (sb->s_dev == dev) { + sb->s_count++; + spin_unlock(&sb_lock); + down_read(&sb->s_umount); + if (sb->s_root) + return sb; + up_read(&sb->s_umount); + /* restart only when sb is no longer on the list */ + spin_lock(&sb_lock); + if (__put_super_and_need_restart(sb)) + goto rescan; + } + } + spin_unlock(&sb_lock); + return NULL; +} + +SYSCALL_DEFINE2(ustat, unsigned, dev, struct ustat __user *, ubuf) +{ + struct super_block *s; + struct ustat tmp; + struct kstatfs sbuf; + int err = -EINVAL; + + s = user_get_super(new_decode_dev(dev)); + if (s == NULL) + goto out; + err = vfs_statfs(s->s_root, &sbuf); + drop_super(s); + if (err) + goto out; + + memset(&tmp,0,sizeof(struct ustat)); + tmp.f_tfree = sbuf.f_bfree; + tmp.f_tinode = sbuf.f_ffree; + + err = copy_to_user(ubuf,&tmp,sizeof(struct ustat)) ? -EFAULT : 0; +out: + return err; +} + +/** + * do_remount_sb - asks filesystem to change mount options. + * @sb: superblock in question + * @flags: numeric part of options + * @data: the rest of options + * @force: whether or not to force the change + * + * Alters the mount options of a mounted file system. + */ +int do_remount_sb(struct super_block *sb, int flags, void *data, int force) +{ + int retval; + int remount_rw; + + if (sb->s_frozen != SB_UNFROZEN) + return -EBUSY; + +#ifdef CONFIG_BLOCK + if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev)) + return -EACCES; +#endif + + if (flags & MS_RDONLY) + acct_auto_close(sb); + shrink_dcache_sb(sb); + sync_filesystem(sb); + + /* If we are remounting RDONLY and current sb is read/write, + make sure there are no rw files opened */ + if ((flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY)) { + if (force) + mark_files_ro(sb); + else if (!fs_may_remount_ro(sb)) + return -EBUSY; + retval = vfs_dq_off(sb, 1); + if (retval < 0 && retval != -ENOSYS) + return -EBUSY; + } + remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY); + + if (sb->s_op->remount_fs) { + retval = sb->s_op->remount_fs(sb, &flags, data); + if (retval) + return retval; + } + sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); + if (remount_rw) + vfs_dq_quota_on_remount(sb); + return 0; +} + +static void do_emergency_remount(struct work_struct *work) +{ + struct super_block *sb; + + spin_lock(&sb_lock); + list_for_each_entry(sb, &super_blocks, s_list) { + sb->s_count++; + spin_unlock(&sb_lock); + down_write(&sb->s_umount); + if (sb->s_root && sb->s_bdev && !(sb->s_flags & MS_RDONLY)) { + /* + * ->remount_fs needs lock_kernel(). + * + * What lock protects sb->s_flags?? + */ + do_remount_sb(sb, MS_RDONLY, NULL, 1); + } + up_write(&sb->s_umount); + put_super(sb); + spin_lock(&sb_lock); + } + spin_unlock(&sb_lock); + kfree(work); + printk("Emergency Remount complete\n"); +} + +void emergency_remount(void) +{ + struct work_struct *work; + + work = kmalloc(sizeof(*work), GFP_ATOMIC); + if (work) { + INIT_WORK(work, do_emergency_remount); + schedule_work(work); + } +} + +/* + * Unnamed block devices are dummy devices used by virtual + * filesystems which don't use real block-devices. -- jrs + */ + +static DEFINE_IDA(unnamed_dev_ida); +static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */ +static int unnamed_dev_start = 0; /* don't bother trying below it */ + +int set_anon_super(struct super_block *s, void *data) +{ + int dev; + int error; + + retry: + if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0) + return -ENOMEM; + spin_lock(&unnamed_dev_lock); + error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev); + if (!error) + unnamed_dev_start = dev + 1; + spin_unlock(&unnamed_dev_lock); + if (error == -EAGAIN) + /* We raced and lost with another CPU. */ + goto retry; + else if (error) + return -EAGAIN; + + if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) { + spin_lock(&unnamed_dev_lock); + ida_remove(&unnamed_dev_ida, dev); + if (unnamed_dev_start > dev) + unnamed_dev_start = dev; + spin_unlock(&unnamed_dev_lock); + return -EMFILE; + } + s->s_dev = MKDEV(0, dev & MINORMASK); + return 0; +} + +EXPORT_SYMBOL(set_anon_super); + +void kill_anon_super(struct super_block *sb) +{ + int slot = MINOR(sb->s_dev); + + generic_shutdown_super(sb); + spin_lock(&unnamed_dev_lock); + ida_remove(&unnamed_dev_ida, slot); + if (slot < unnamed_dev_start) + unnamed_dev_start = slot; + spin_unlock(&unnamed_dev_lock); +} + +EXPORT_SYMBOL(kill_anon_super); + +void kill_litter_super(struct super_block *sb) +{ + if (sb->s_root) + d_genocide(sb->s_root); + kill_anon_super(sb); +} + +EXPORT_SYMBOL(kill_litter_super); + +static int ns_test_super(struct super_block *sb, void *data) +{ + return sb->s_fs_info == data; +} + +static int ns_set_super(struct super_block *sb, void *data) +{ + sb->s_fs_info = data; + return set_anon_super(sb, NULL); +} + +int get_sb_ns(struct file_system_type *fs_type, int flags, void *data, + int (*fill_super)(struct super_block *, void *, int), + struct vfsmount *mnt) +{ + struct super_block *sb; + + sb = sget(fs_type, ns_test_super, ns_set_super, data); + if (IS_ERR(sb)) + return PTR_ERR(sb); + + if (!sb->s_root) { + int err; + sb->s_flags = flags; + err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0); + if (err) { + deactivate_locked_super(sb); + return err; + } + + sb->s_flags |= MS_ACTIVE; + } + + simple_set_mnt(mnt, sb); + return 0; +} + +EXPORT_SYMBOL(get_sb_ns); + +#ifdef CONFIG_BLOCK +static int set_bdev_super(struct super_block *s, void *data) +{ + s->s_bdev = data; + s->s_dev = s->s_bdev->bd_dev; + + /* + * We set the bdi here to the queue backing, file systems can + * overwrite this in ->fill_super() + */ + s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; + return 0; +} + +static int test_bdev_super(struct super_block *s, void *data) +{ + return (void *)s->s_bdev == data; +} + +int get_sb_bdev(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data, + int (*fill_super)(struct super_block *, void *, int), + struct vfsmount *mnt) +{ + struct block_device *bdev; + struct super_block *s; + fmode_t mode = FMODE_READ; + int error = 0; + + if (!(flags & MS_RDONLY)) + mode |= FMODE_WRITE; + + bdev = open_bdev_exclusive(dev_name, mode, fs_type); + if (IS_ERR(bdev)) + return PTR_ERR(bdev); + + /* + * once the super is inserted into the list by sget, s_umount + * will protect the lockfs code from trying to start a snapshot + * while we are mounting + */ + mutex_lock(&bdev->bd_fsfreeze_mutex); + if (bdev->bd_fsfreeze_count > 0) { + mutex_unlock(&bdev->bd_fsfreeze_mutex); + error = -EBUSY; + goto error_bdev; + } + s = sget(fs_type, test_bdev_super, set_bdev_super, bdev); + mutex_unlock(&bdev->bd_fsfreeze_mutex); + if (IS_ERR(s)) + goto error_s; + + if (s->s_root) { + if ((flags ^ s->s_flags) & MS_RDONLY) { + deactivate_locked_super(s); + error = -EBUSY; + goto error_bdev; + } + + close_bdev_exclusive(bdev, mode); + } else { + char b[BDEVNAME_SIZE]; + + s->s_flags = flags; + s->s_mode = mode; + strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); + sb_set_blocksize(s, block_size(bdev)); + error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); + if (error) { + deactivate_locked_super(s); + goto error; + } + + s->s_flags |= MS_ACTIVE; + bdev->bd_super = s; + } + + simple_set_mnt(mnt, s); + return 0; + +error_s: + error = PTR_ERR(s); +error_bdev: + close_bdev_exclusive(bdev, mode); +error: + return error; +} + +EXPORT_SYMBOL(get_sb_bdev); + +void kill_block_super(struct super_block *sb) +{ + struct block_device *bdev = sb->s_bdev; + fmode_t mode = sb->s_mode; + + bdev->bd_super = NULL; + generic_shutdown_super(sb); + sync_blockdev(bdev); + close_bdev_exclusive(bdev, mode); +} + +EXPORT_SYMBOL(kill_block_super); +#endif + +int get_sb_nodev(struct file_system_type *fs_type, + int flags, void *data, + int (*fill_super)(struct super_block *, void *, int), + struct vfsmount *mnt) +{ + int error; + struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL); + + if (IS_ERR(s)) + return PTR_ERR(s); + + s->s_flags = flags; + + error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); + if (error) { + deactivate_locked_super(s); + return error; + } + s->s_flags |= MS_ACTIVE; + simple_set_mnt(mnt, s); + return 0; +} + +EXPORT_SYMBOL(get_sb_nodev); + +static int compare_single(struct super_block *s, void *p) +{ + return 1; +} + +int get_sb_single(struct file_system_type *fs_type, + int flags, void *data, + int (*fill_super)(struct super_block *, void *, int), + struct vfsmount *mnt) +{ + struct super_block *s; + int error; + + s = sget(fs_type, compare_single, set_anon_super, NULL); + if (IS_ERR(s)) + return PTR_ERR(s); + if (!s->s_root) { + s->s_flags = flags; + error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); + if (error) { + deactivate_locked_super(s); + return error; + } + s->s_flags |= MS_ACTIVE; + } else { + do_remount_sb(s, flags, data, 0); + } + simple_set_mnt(mnt, s); + return 0; +} + +EXPORT_SYMBOL(get_sb_single); + +struct vfsmount * +vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data) +{ + struct vfsmount *mnt; + char *secdata = NULL; + int error; + + if (!type) + return ERR_PTR(-ENODEV); + + error = -ENOMEM; + mnt = alloc_vfsmnt(name); + if (!mnt) + goto out; + + if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) { + secdata = alloc_secdata(); + if (!secdata) + goto out_mnt; + + error = security_sb_copy_data(data, secdata); + if (error) + goto out_free_secdata; + } + + error = type->get_sb(type, flags, name, data, mnt); + if (error < 0) + goto out_free_secdata; + BUG_ON(!mnt->mnt_sb); + + error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata); + if (error) + goto out_sb; + + /* + * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE + * but s_maxbytes was an unsigned long long for many releases. Throw + * this warning for a little while to try and catch filesystems that + * violate this rule. This warning should be either removed or + * converted to a BUG() in 2.6.34. + */ + WARN((mnt->mnt_sb->s_maxbytes < 0), "%s set sb->s_maxbytes to " + "negative value (%lld)\n", type->name, mnt->mnt_sb->s_maxbytes); + + mnt->mnt_mountpoint = mnt->mnt_root; + mnt->mnt_parent = mnt; + up_write(&mnt->mnt_sb->s_umount); + free_secdata(secdata); + return mnt; +out_sb: + dput(mnt->mnt_root); + deactivate_locked_super(mnt->mnt_sb); +out_free_secdata: + free_secdata(secdata); +out_mnt: + free_vfsmnt(mnt); +out: + return ERR_PTR(error); +} + +EXPORT_SYMBOL_GPL(vfs_kern_mount); + +static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype) +{ + int err; + const char *subtype = strchr(fstype, '.'); + if (subtype) { + subtype++; + err = -EINVAL; + if (!subtype[0]) + goto err; + } else + subtype = ""; + + mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL); + err = -ENOMEM; + if (!mnt->mnt_sb->s_subtype) + goto err; + return mnt; + + err: + mntput(mnt); + return ERR_PTR(err); +} + +struct vfsmount * +do_kern_mount(const char *fstype, int flags, const char *name, void *data) +{ + struct file_system_type *type = get_fs_type(fstype); + struct vfsmount *mnt; + if (!type) + return ERR_PTR(-ENODEV); + mnt = vfs_kern_mount(type, flags, name, data); + if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) && + !mnt->mnt_sb->s_subtype) + mnt = fs_set_subtype(mnt, fstype); + put_filesystem(type); + return mnt; +} +EXPORT_SYMBOL_GPL(do_kern_mount); + +struct vfsmount *kern_mount_data(struct file_system_type *type, void *data) +{ + return vfs_kern_mount(type, MS_KERNMOUNT, type->name, data); +} + +EXPORT_SYMBOL_GPL(kern_mount_data); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/sysfs/mount.c kernel-2.6.32.54.vs/linux-2.6.32/fs/sysfs/mount.c --- kernel-2.6.32.54/linux-2.6.32/fs/sysfs/mount.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/sysfs/mount.c 2012-01-16 14:51:21.897408771 +0100 @@ -47,7 +47,7 @@ sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; - sb->s_magic = SYSFS_MAGIC; + sb->s_magic = SYSFS_SUPER_MAGIC; sb->s_op = &sysfs_ops; sb->s_time_gran = 1; sysfs_sb = sb; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/utimes.c kernel-2.6.32.54.vs/linux-2.6.32/fs/utimes.c --- kernel-2.6.32.54/linux-2.6.32/fs/utimes.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/utimes.c 2012-01-16 14:51:21.897408771 +0100 @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/xattr.c kernel-2.6.32.54.vs/linux-2.6.32/fs/xattr.c --- kernel-2.6.32.54/linux-2.6.32/fs/xattr.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/xattr.c 2012-01-16 14:51:21.901408757 +0100 @@ -18,6 +18,7 @@ #include #include #include +#include #include @@ -49,7 +50,7 @@ * The trusted.* namespace can only be accessed by a privileged user. */ if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) - return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM); + return (vx_capable(CAP_SYS_ADMIN, VXC_FS_TRUSTED) ? 0 : -EPERM); /* In user.* namespace, only regular files and directories can have * extended attributes. For sticky directories, only the owner and diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/xfs/linux-2.6/xfs_ioctl.c kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/linux-2.6/xfs_ioctl.c --- kernel-2.6.32.54/linux-2.6.32/fs/xfs/linux-2.6/xfs_ioctl.c 2012-01-16 15:01:39.672726283 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/linux-2.6/xfs_ioctl.c 2012-01-16 14:51:21.901408757 +0100 @@ -34,7 +34,6 @@ #include "xfs_dir2_sf.h" #include "xfs_dinode.h" #include "xfs_inode.h" -#include "xfs_ioctl.h" #include "xfs_btree.h" #include "xfs_ialloc.h" #include "xfs_rtalloc.h" @@ -746,6 +745,10 @@ xflags |= XFS_XFLAG_IMMUTABLE; else xflags &= ~XFS_XFLAG_IMMUTABLE; + if (flags & FS_IXUNLINK_FL) + xflags |= XFS_XFLAG_IXUNLINK; + else + xflags &= ~XFS_XFLAG_IXUNLINK; if (flags & FS_APPEND_FL) xflags |= XFS_XFLAG_APPEND; else @@ -774,6 +777,8 @@ if (di_flags & XFS_DIFLAG_IMMUTABLE) flags |= FS_IMMUTABLE_FL; + if (di_flags & XFS_DIFLAG_IXUNLINK) + flags |= FS_IXUNLINK_FL; if (di_flags & XFS_DIFLAG_APPEND) flags |= FS_APPEND_FL; if (di_flags & XFS_DIFLAG_SYNC) @@ -834,6 +839,8 @@ di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC); if (xflags & XFS_XFLAG_IMMUTABLE) di_flags |= XFS_DIFLAG_IMMUTABLE; + if (xflags & XFS_XFLAG_IXUNLINK) + di_flags |= XFS_DIFLAG_IXUNLINK; if (xflags & XFS_XFLAG_APPEND) di_flags |= XFS_DIFLAG_APPEND; if (xflags & XFS_XFLAG_SYNC) @@ -876,6 +883,10 @@ inode->i_flags |= S_IMMUTABLE; else inode->i_flags &= ~S_IMMUTABLE; + if (xflags & XFS_XFLAG_IXUNLINK) + inode->i_flags |= S_IXUNLINK; + else + inode->i_flags &= ~S_IXUNLINK; if (xflags & XFS_XFLAG_APPEND) inode->i_flags |= S_APPEND; else @@ -1352,10 +1363,18 @@ case XFS_IOC_FSGETXATTRA: return xfs_ioc_fsgetxattr(ip, 1, arg); case XFS_IOC_FSSETXATTR: + if (IS_BARRIER(inode)) { + vxwprintk_task(1, "messing with the barrier."); + return -XFS_ERROR(EACCES); + } return xfs_ioc_fssetxattr(ip, filp, arg); case XFS_IOC_GETXFLAGS: return xfs_ioc_getxflags(ip, arg); case XFS_IOC_SETXFLAGS: + if (IS_BARRIER(inode)) { + vxwprintk_task(1, "messing with the barrier."); + return -XFS_ERROR(EACCES); + } return xfs_ioc_setxflags(ip, filp, arg); case XFS_IOC_FSSETDM: { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/xfs/linux-2.6/xfs_ioctl.h kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/linux-2.6/xfs_ioctl.h --- kernel-2.6.32.54/linux-2.6.32/fs/xfs/linux-2.6/xfs_ioctl.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/linux-2.6/xfs_ioctl.h 2012-01-16 14:51:21.901408757 +0100 @@ -70,6 +70,12 @@ void __user *uhandle, u32 hlen); +extern int +xfs_sync_flags( + struct inode *inode, + int flags, + int vflags); + extern long xfs_file_ioctl( struct file *filp, diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/xfs/linux-2.6/xfs_iops.c kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/linux-2.6/xfs_iops.c --- kernel-2.6.32.54/linux-2.6.32/fs/xfs/linux-2.6/xfs_iops.c 2012-01-16 15:01:39.672726283 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/linux-2.6/xfs_iops.c 2012-01-16 14:51:21.901408757 +0100 @@ -36,6 +36,7 @@ #include "xfs_attr_sf.h" #include "xfs_dinode.h" #include "xfs_inode.h" +#include "xfs_ioctl.h" #include "xfs_bmap.h" #include "xfs_btree.h" #include "xfs_ialloc.h" @@ -55,6 +56,7 @@ #include #include #include +#include /* * Bring the timestamps in the XFS inode uptodate. @@ -495,6 +497,7 @@ stat->nlink = ip->i_d.di_nlink; stat->uid = ip->i_d.di_uid; stat->gid = ip->i_d.di_gid; + stat->tag = ip->i_d.di_tag; stat->ino = ip->i_ino; stat->atime = inode->i_atime; stat->mtime = inode->i_mtime; @@ -686,6 +689,7 @@ .listxattr = xfs_vn_listxattr, .fallocate = xfs_vn_fallocate, .fiemap = xfs_vn_fiemap, + .sync_flags = xfs_sync_flags, }; static const struct inode_operations xfs_dir_inode_operations = { @@ -711,6 +715,7 @@ .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, + .sync_flags = xfs_sync_flags, }; static const struct inode_operations xfs_dir_ci_inode_operations = { @@ -760,6 +765,10 @@ inode->i_flags |= S_IMMUTABLE; else inode->i_flags &= ~S_IMMUTABLE; + if (ip->i_d.di_flags & XFS_DIFLAG_IXUNLINK) + inode->i_flags |= S_IXUNLINK; + else + inode->i_flags &= ~S_IXUNLINK; if (ip->i_d.di_flags & XFS_DIFLAG_APPEND) inode->i_flags |= S_APPEND; else @@ -772,6 +781,15 @@ inode->i_flags |= S_NOATIME; else inode->i_flags &= ~S_NOATIME; + + if (ip->i_d.di_vflags & XFS_DIVFLAG_BARRIER) + inode->i_vflags |= V_BARRIER; + else + inode->i_vflags &= ~V_BARRIER; + if (ip->i_d.di_vflags & XFS_DIVFLAG_COW) + inode->i_vflags |= V_COW; + else + inode->i_vflags &= ~V_COW; } /* @@ -800,6 +818,7 @@ inode->i_nlink = ip->i_d.di_nlink; inode->i_uid = ip->i_d.di_uid; inode->i_gid = ip->i_d.di_gid; + inode->i_tag = ip->i_d.di_tag; switch (inode->i_mode & S_IFMT) { case S_IFBLK: diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/xfs/linux-2.6/xfs_linux.h kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/linux-2.6/xfs_linux.h --- kernel-2.6.32.54/linux-2.6.32/fs/xfs/linux-2.6/xfs_linux.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/linux-2.6/xfs_linux.h 2012-01-16 14:51:21.901408757 +0100 @@ -119,6 +119,7 @@ #define current_cpu() (raw_smp_processor_id()) #define current_pid() (current->pid) +#define current_fstag(cred,vp) (dx_current_fstag((vp)->i_sb)) #define current_test_flags(f) (current->flags & (f)) #define current_set_flags_nested(sp, f) \ (*(sp) = current->flags, current->flags |= (f)) diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/xfs/linux-2.6/xfs_super.c kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/linux-2.6/xfs_super.c --- kernel-2.6.32.54/linux-2.6.32/fs/xfs/linux-2.6/xfs_super.c 2012-01-16 15:01:39.676726269 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/linux-2.6/xfs_super.c 2012-01-16 14:51:21.905408743 +0100 @@ -117,6 +117,9 @@ #define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */ #define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */ #define MNTOPT_DMI "dmi" /* DMI enabled (DMAPI / XDSM) */ +#define MNTOPT_TAGXID "tagxid" /* context tagging for inodes */ +#define MNTOPT_TAGGED "tag" /* context tagging for inodes */ +#define MNTOPT_NOTAGTAG "notag" /* do not use context tagging */ /* * Table driven mount option parser. @@ -125,10 +128,14 @@ * in the future, too. */ enum { + Opt_tag, Opt_notag, Opt_barrier, Opt_nobarrier, Opt_err }; static const match_table_t tokens = { + {Opt_tag, "tagxid"}, + {Opt_tag, "tag"}, + {Opt_notag, "notag"}, {Opt_barrier, "barrier"}, {Opt_nobarrier, "nobarrier"}, {Opt_err, NULL} @@ -382,6 +389,19 @@ } else if (!strcmp(this_char, "irixsgid")) { cmn_err(CE_WARN, "XFS: irixsgid is now a sysctl(2) variable, option is deprecated."); +#ifndef CONFIG_TAGGING_NONE + } else if (!strcmp(this_char, MNTOPT_TAGGED)) { + mp->m_flags |= XFS_MOUNT_TAGGED; + } else if (!strcmp(this_char, MNTOPT_NOTAGTAG)) { + mp->m_flags &= ~XFS_MOUNT_TAGGED; + } else if (!strcmp(this_char, MNTOPT_TAGXID)) { + mp->m_flags |= XFS_MOUNT_TAGGED; +#endif +#ifdef CONFIG_PROPAGATE + } else if (!strcmp(this_char, MNTOPT_TAGGED)) { + /* use value */ + mp->m_flags |= XFS_MOUNT_TAGGED; +#endif } else { cmn_err(CE_WARN, "XFS: unknown mount option [%s].", this_char); @@ -1295,6 +1315,16 @@ case Opt_nobarrier: mp->m_flags &= ~XFS_MOUNT_BARRIER; break; + case Opt_tag: + if (!(sb->s_flags & MS_TAGGED)) { + printk(KERN_INFO + "XFS: %s: tagging not permitted on remount.\n", + sb->s_id); + return -EINVAL; + } + break; + case Opt_notag: + break; default: /* * Logically we would return an error here to prevent @@ -1530,6 +1560,9 @@ XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, mtpt, mp->m_fsname); + if (mp->m_flags & XFS_MOUNT_TAGGED) + sb->s_flags |= MS_TAGGED; + sb->s_magic = XFS_SB_MAGIC; sb->s_blocksize = mp->m_sb.sb_blocksize; sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_dinode.h kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_dinode.h --- kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_dinode.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_dinode.h 2012-01-16 14:51:21.905408743 +0100 @@ -50,7 +50,9 @@ __be32 di_gid; /* owner's group id */ __be32 di_nlink; /* number of links to file */ __be16 di_projid; /* owner's project id */ - __u8 di_pad[8]; /* unused, zeroed space */ + __be16 di_tag; /* context tagging */ + __be16 di_vflags; /* vserver specific flags */ + __u8 di_pad[4]; /* unused, zeroed space */ __be16 di_flushiter; /* incremented on flush */ xfs_timestamp_t di_atime; /* time last accessed */ xfs_timestamp_t di_mtime; /* time last modified */ @@ -183,6 +185,8 @@ #define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */ #define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */ #define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */ +#define XFS_DIFLAG_IXUNLINK_BIT 15 /* Immutable inver on unlink */ + #define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT) #define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT) #define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT) @@ -198,6 +202,7 @@ #define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT) #define XFS_DIFLAG_NODEFRAG (1 << XFS_DIFLAG_NODEFRAG_BIT) #define XFS_DIFLAG_FILESTREAM (1 << XFS_DIFLAG_FILESTREAM_BIT) +#define XFS_DIFLAG_IXUNLINK (1 << XFS_DIFLAG_IXUNLINK_BIT) #ifdef CONFIG_XFS_RT #define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) @@ -210,6 +215,10 @@ XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \ XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \ XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \ - XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM) + XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM | \ + XFS_DIFLAG_IXUNLINK) + +#define XFS_DIVFLAG_BARRIER 0x01 +#define XFS_DIVFLAG_COW 0x02 #endif /* __XFS_DINODE_H__ */ diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_fs.h kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_fs.h --- kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_fs.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_fs.h 2012-01-16 14:51:21.905408743 +0100 @@ -67,6 +67,9 @@ #define XFS_XFLAG_EXTSZINHERIT 0x00001000 /* inherit inode extent size */ #define XFS_XFLAG_NODEFRAG 0x00002000 /* do not defragment */ #define XFS_XFLAG_FILESTREAM 0x00004000 /* use filestream allocator */ +#define XFS_XFLAG_IXUNLINK 0x00008000 /* immutable invert on unlink */ +#define XFS_XFLAG_BARRIER 0x10000000 /* chroot() barrier */ +#define XFS_XFLAG_COW 0x20000000 /* copy on write mark */ #define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */ /* @@ -292,7 +295,8 @@ __s32 bs_extents; /* number of extents */ __u32 bs_gen; /* generation count */ __u16 bs_projid; /* project id */ - unsigned char bs_pad[14]; /* pad space, unused */ + __u16 bs_tag; /* context tagging */ + unsigned char bs_pad[12]; /* pad space, unused */ __u32 bs_dmevmask; /* DMIG event mask */ __u16 bs_dmstate; /* DMIG state info */ __u16 bs_aextents; /* attribute number of extents */ diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_ialloc.c kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_ialloc.c --- kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_ialloc.c 2012-01-16 15:01:39.688726227 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_ialloc.c 2012-01-16 14:51:21.905408743 +0100 @@ -41,7 +41,6 @@ #include "xfs_error.h" #include "xfs_bmap.h" - /* * Allocation group level functions. */ diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_inode.c kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_inode.c --- kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_inode.c 2012-01-16 15:01:39.732726071 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_inode.c 2012-01-16 14:51:21.909408729 +0100 @@ -249,6 +249,7 @@ return 0; } +#include /* * This routine is called to map an inode to the buffer containing @@ -654,15 +655,25 @@ STATIC void xfs_dinode_from_disk( xfs_icdinode_t *to, - xfs_dinode_t *from) + xfs_dinode_t *from, + int tagged) { + uint32_t uid, gid, tag; + to->di_magic = be16_to_cpu(from->di_magic); to->di_mode = be16_to_cpu(from->di_mode); to->di_version = from ->di_version; to->di_format = from->di_format; to->di_onlink = be16_to_cpu(from->di_onlink); - to->di_uid = be32_to_cpu(from->di_uid); - to->di_gid = be32_to_cpu(from->di_gid); + + uid = be32_to_cpu(from->di_uid); + gid = be32_to_cpu(from->di_gid); + tag = be16_to_cpu(from->di_tag); + + to->di_uid = INOTAG_UID(tagged, uid, gid); + to->di_gid = INOTAG_GID(tagged, uid, gid); + to->di_tag = INOTAG_TAG(tagged, uid, gid, tag); + to->di_nlink = be32_to_cpu(from->di_nlink); to->di_projid = be16_to_cpu(from->di_projid); memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); @@ -683,21 +694,26 @@ to->di_dmevmask = be32_to_cpu(from->di_dmevmask); to->di_dmstate = be16_to_cpu(from->di_dmstate); to->di_flags = be16_to_cpu(from->di_flags); + to->di_vflags = be16_to_cpu(from->di_vflags); to->di_gen = be32_to_cpu(from->di_gen); } void xfs_dinode_to_disk( xfs_dinode_t *to, - xfs_icdinode_t *from) + xfs_icdinode_t *from, + int tagged) { to->di_magic = cpu_to_be16(from->di_magic); to->di_mode = cpu_to_be16(from->di_mode); to->di_version = from ->di_version; to->di_format = from->di_format; to->di_onlink = cpu_to_be16(from->di_onlink); - to->di_uid = cpu_to_be32(from->di_uid); - to->di_gid = cpu_to_be32(from->di_gid); + + to->di_uid = cpu_to_be32(TAGINO_UID(tagged, from->di_uid, from->di_tag)); + to->di_gid = cpu_to_be32(TAGINO_GID(tagged, from->di_gid, from->di_tag)); + to->di_tag = cpu_to_be16(TAGINO_TAG(tagged, from->di_tag)); + to->di_nlink = cpu_to_be32(from->di_nlink); to->di_projid = cpu_to_be16(from->di_projid); memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); @@ -718,12 +734,14 @@ to->di_dmevmask = cpu_to_be32(from->di_dmevmask); to->di_dmstate = cpu_to_be16(from->di_dmstate); to->di_flags = cpu_to_be16(from->di_flags); + to->di_vflags = cpu_to_be16(from->di_vflags); to->di_gen = cpu_to_be32(from->di_gen); } STATIC uint _xfs_dic2xflags( - __uint16_t di_flags) + __uint16_t di_flags, + __uint16_t di_vflags) { uint flags = 0; @@ -734,6 +752,8 @@ flags |= XFS_XFLAG_PREALLOC; if (di_flags & XFS_DIFLAG_IMMUTABLE) flags |= XFS_XFLAG_IMMUTABLE; + if (di_flags & XFS_DIFLAG_IXUNLINK) + flags |= XFS_XFLAG_IXUNLINK; if (di_flags & XFS_DIFLAG_APPEND) flags |= XFS_XFLAG_APPEND; if (di_flags & XFS_DIFLAG_SYNC) @@ -758,6 +778,10 @@ flags |= XFS_XFLAG_FILESTREAM; } + if (di_vflags & XFS_DIVFLAG_BARRIER) + flags |= FS_BARRIER_FL; + if (di_vflags & XFS_DIVFLAG_COW) + flags |= FS_COW_FL; return flags; } @@ -767,7 +791,7 @@ { xfs_icdinode_t *dic = &ip->i_d; - return _xfs_dic2xflags(dic->di_flags) | + return _xfs_dic2xflags(dic->di_flags, dic->di_vflags) | (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0); } @@ -775,7 +799,8 @@ xfs_dic2xflags( xfs_dinode_t *dip) { - return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) | + return _xfs_dic2xflags(be16_to_cpu(dip->di_flags), + be16_to_cpu(dip->di_vflags)) | (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0); } @@ -808,7 +833,6 @@ if (error) return error; dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); - /* * If we got something that isn't an inode it means someone * (nfs or dmi) has a stale handle. @@ -833,7 +857,8 @@ * Otherwise, just get the truly permanent information. */ if (dip->di_mode) { - xfs_dinode_from_disk(&ip->i_d, dip); + xfs_dinode_from_disk(&ip->i_d, dip, + mp->m_flags & XFS_MOUNT_TAGGED); error = xfs_iformat(ip, dip); if (error) { #ifdef DEBUG @@ -1033,6 +1058,7 @@ ASSERT(ip->i_d.di_nlink == nlink); ip->i_d.di_uid = current_fsuid(); ip->i_d.di_gid = current_fsgid(); + ip->i_d.di_tag = current_fstag(cr, &ip->i_vnode); ip->i_d.di_projid = prid; memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); @@ -1093,6 +1119,7 @@ ip->i_d.di_dmevmask = 0; ip->i_d.di_dmstate = 0; ip->i_d.di_flags = 0; + ip->i_d.di_vflags = 0; flags = XFS_ILOG_CORE; switch (mode & S_IFMT) { case S_IFIFO: @@ -2169,6 +2196,7 @@ } ip->i_d.di_mode = 0; /* mark incore inode as free */ ip->i_d.di_flags = 0; + ip->i_d.di_vflags = 0; ip->i_d.di_dmevmask = 0; ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ ip->i_df.if_ext_max = @@ -3149,7 +3177,8 @@ * because if the inode is dirty at all the core must * be. */ - xfs_dinode_to_disk(dip, &ip->i_d); + xfs_dinode_to_disk(dip, &ip->i_d, + mp->m_flags & XFS_MOUNT_TAGGED); /* Wrap, we never let the log put out DI_MAX_FLUSH */ if (ip->i_d.di_flushiter == DI_MAX_FLUSH) diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_inode.h kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_inode.h --- kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_inode.h 2012-01-16 15:01:39.752726000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_inode.h 2012-01-16 14:51:21.913408715 +0100 @@ -135,7 +135,9 @@ __uint32_t di_gid; /* owner's group id */ __uint32_t di_nlink; /* number of links to file */ __uint16_t di_projid; /* owner's project id */ - __uint8_t di_pad[8]; /* unused, zeroed space */ + __uint16_t di_tag; /* context tagging */ + __uint16_t di_vflags; /* vserver specific flags */ + __uint8_t di_pad[4]; /* unused, zeroed space */ __uint16_t di_flushiter; /* incremented on flush */ xfs_ictimestamp_t di_atime; /* time last accessed */ xfs_ictimestamp_t di_mtime; /* time last modified */ @@ -569,7 +571,7 @@ int xfs_iread(struct xfs_mount *, struct xfs_trans *, struct xfs_inode *, uint); void xfs_dinode_to_disk(struct xfs_dinode *, - struct xfs_icdinode *); + struct xfs_icdinode *, int); void xfs_idestroy_fork(struct xfs_inode *, int); void xfs_idata_realloc(struct xfs_inode *, int, int); void xfs_iroot_realloc(struct xfs_inode *, int, int); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_itable.c kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_itable.c --- kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_itable.c 2012-01-16 15:01:39.752726000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_itable.c 2012-01-16 14:51:21.913408715 +0100 @@ -100,6 +100,7 @@ buf->bs_mode = dic->di_mode; buf->bs_uid = dic->di_uid; buf->bs_gid = dic->di_gid; + buf->bs_tag = dic->di_tag; buf->bs_size = dic->di_size; /* diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_log_recover.c kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_log_recover.c --- kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_log_recover.c 2012-01-16 15:01:39.752726000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_log_recover.c 2012-01-16 14:51:21.913408715 +0100 @@ -2467,7 +2467,8 @@ } /* The core is in in-core format */ - xfs_dinode_to_disk(dip, (xfs_icdinode_t *)item->ri_buf[1].i_addr); + xfs_dinode_to_disk(dip, (xfs_icdinode_t *)item->ri_buf[1].i_addr, + mp->m_flags & XFS_MOUNT_TAGGED); /* the rest is in on-disk format */ if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) { diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_mount.h kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_mount.h --- kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_mount.h 2012-01-16 15:01:39.752726000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_mount.h 2012-01-16 14:51:21.913408715 +0100 @@ -285,6 +285,7 @@ allocator */ #define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */ +#define XFS_MOUNT_TAGGED (1ULL << 31) /* context tagging */ /* * Default minimum read and write sizes. diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_vnodeops.c kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_vnodeops.c --- kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_vnodeops.c 2012-01-16 15:01:39.764725958 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_vnodeops.c 2012-01-16 14:51:21.917408701 +0100 @@ -54,6 +54,80 @@ #include "xfs_filestream.h" #include "xfs_vnodeops.h" + +STATIC void +xfs_get_inode_flags( + xfs_inode_t *ip) +{ + struct inode *inode = VFS_I(ip); + unsigned int flags = inode->i_flags; + unsigned int vflags = inode->i_vflags; + + if (flags & S_IMMUTABLE) + ip->i_d.di_flags |= XFS_DIFLAG_IMMUTABLE; + else + ip->i_d.di_flags &= ~XFS_DIFLAG_IMMUTABLE; + if (flags & S_IXUNLINK) + ip->i_d.di_flags |= XFS_DIFLAG_IXUNLINK; + else + ip->i_d.di_flags &= ~XFS_DIFLAG_IXUNLINK; + + if (vflags & V_BARRIER) + ip->i_d.di_vflags |= XFS_DIVFLAG_BARRIER; + else + ip->i_d.di_vflags &= ~XFS_DIVFLAG_BARRIER; + if (vflags & V_COW) + ip->i_d.di_vflags |= XFS_DIVFLAG_COW; + else + ip->i_d.di_vflags &= ~XFS_DIVFLAG_COW; +} + +int +xfs_sync_flags( + struct inode *inode, + int flags, + int vflags) +{ + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp; + unsigned int lock_flags = 0; + int code; + + tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); + code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); + if (code) + goto error_out; + + lock_flags = XFS_ILOCK_EXCL; + xfs_ilock(ip, lock_flags); + + xfs_trans_ijoin(tp, ip, lock_flags); + xfs_trans_ihold(tp, ip); + + inode->i_flags = flags; + inode->i_vflags = vflags; + xfs_get_inode_flags(ip); + + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + xfs_ichgtime(ip, XFS_ICHGTIME_CHG); + + XFS_STATS_INC(xs_ig_attrchg); + + if (mp->m_flags & XFS_MOUNT_WSYNC) + xfs_trans_set_sync(tp); + code = xfs_trans_commit(tp, 0); + xfs_iunlock(ip, lock_flags); + return code; + +error_out: + xfs_trans_cancel(tp, 0); + if (lock_flags) + xfs_iunlock(ip, lock_flags); + return code; +} + + int xfs_setattr( struct xfs_inode *ip, @@ -69,6 +143,7 @@ uint commit_flags=0; uid_t uid=0, iuid=0; gid_t gid=0, igid=0; + tag_t tag=0, itag=0; struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2; int need_iolock = 1; @@ -161,7 +236,7 @@ /* * Change file ownership. Must be the owner or privileged. */ - if (mask & (ATTR_UID|ATTR_GID)) { + if (mask & (ATTR_UID|ATTR_GID|ATTR_TAG)) { /* * These IDs could have changed since we last looked at them. * But, we're assured that if the ownership did change @@ -170,8 +245,10 @@ */ iuid = ip->i_d.di_uid; igid = ip->i_d.di_gid; + itag = ip->i_d.di_tag; gid = (mask & ATTR_GID) ? iattr->ia_gid : igid; uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid; + tag = (mask & ATTR_TAG) ? iattr->ia_tag : itag; /* * Do a quota reservation only if uid/gid is actually @@ -179,7 +256,8 @@ */ if (XFS_IS_QUOTA_RUNNING(mp) && ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || - (XFS_IS_GQUOTA_ON(mp) && igid != gid))) { + (XFS_IS_GQUOTA_ON(mp) && igid != gid) || + (XFS_IS_GQUOTA_ON(mp) && itag != tag))) { ASSERT(tp); code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, capable(CAP_FOWNER) ? @@ -340,7 +418,7 @@ /* * Change file ownership. Must be the owner or privileged. */ - if (mask & (ATTR_UID|ATTR_GID)) { + if (mask & (ATTR_UID|ATTR_GID|ATTR_TAG)) { /* * CAP_FSETID overrides the following restrictions: * @@ -356,6 +434,10 @@ * Change the ownerships and register quota modifications * in the transaction. */ + if (itag != tag) { + ip->i_d.di_tag = tag; + inode->i_tag = tag; + } if (iuid != uid) { if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) { ASSERT(mask & ATTR_UID); diff -Nur kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_vnodeops.h kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_vnodeops.h --- kernel-2.6.32.54/linux-2.6.32/fs/xfs/xfs_vnodeops.h 2012-01-16 15:01:39.764725958 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/fs/xfs/xfs_vnodeops.h 2012-01-16 14:51:21.917408701 +0100 @@ -14,6 +14,7 @@ struct xfs_iomap; +int xfs_sync_xflags(struct xfs_inode *ip); int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags); #define XFS_ATTR_DMI 0x01 /* invocation from a DMI function */ #define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/asm-generic/tlb.h kernel-2.6.32.54.vs/linux-2.6.32/include/asm-generic/tlb.h --- kernel-2.6.32.54/linux-2.6.32/include/asm-generic/tlb.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/asm-generic/tlb.h 2012-01-16 14:51:21.917408701 +0100 @@ -14,6 +14,7 @@ #define _ASM_GENERIC__TLB_H #include +#include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/capability.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/capability.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/capability.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/capability.h 2012-01-16 14:51:21.917408701 +0100 @@ -285,6 +285,7 @@ arbitrary SCSI commands */ /* Allow setting encryption key on loopback filesystem */ /* Allow setting zone reclaim policy */ +/* Allow the selection of a security context */ #define CAP_SYS_ADMIN 21 @@ -357,7 +358,13 @@ #define CAP_MAC_ADMIN 33 -#define CAP_LAST_CAP CAP_MAC_ADMIN +/* Allow context manipulations */ +/* Allow changing context info on files */ + +#define CAP_CONTEXT 34 + + +#define CAP_LAST_CAP CAP_CONTEXT #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP) diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/devpts_fs.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/devpts_fs.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/devpts_fs.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/devpts_fs.h 2012-01-16 14:51:21.921408687 +0100 @@ -45,5 +45,4 @@ #endif - #endif /* _LINUX_DEVPTS_FS_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/ext2_fs.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/ext2_fs.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/ext2_fs.h 2012-01-16 15:01:39.852725646 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/ext2_fs.h 2012-01-16 14:51:21.921408687 +0100 @@ -189,8 +189,12 @@ #define EXT2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */ #define EXT2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */ #define EXT2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/ +#define EXT2_IXUNLINK_FL FS_IXUNLINK_FL /* Immutable invert on unlink */ #define EXT2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */ +#define EXT2_BARRIER_FL FS_BARRIER_FL /* Barrier for chroot() */ +#define EXT2_COW_FL FS_COW_FL /* Copy on Write marker */ + #define EXT2_FL_USER_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */ #define EXT2_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */ @@ -274,7 +278,8 @@ __u16 i_pad1; __le16 l_i_uid_high; /* these 2 fields */ __le16 l_i_gid_high; /* were reserved2[0] */ - __u32 l_i_reserved2; + __le16 l_i_tag; /* Context Tag */ + __u16 l_i_reserved2; } linux2; struct { __u8 h_i_frag; /* Fragment number */ @@ -303,6 +308,7 @@ #define i_gid_low i_gid #define i_uid_high osd2.linux2.l_i_uid_high #define i_gid_high osd2.linux2.l_i_gid_high +#define i_raw_tag osd2.linux2.l_i_tag #define i_reserved2 osd2.linux2.l_i_reserved2 #endif @@ -347,6 +353,7 @@ #define EXT2_MOUNT_USRQUOTA 0x020000 /* user quota */ #define EXT2_MOUNT_GRPQUOTA 0x040000 /* group quota */ #define EXT2_MOUNT_RESERVATION 0x080000 /* Preallocation */ +#define EXT2_MOUNT_TAGGED (1<<24) /* Enable Context Tags */ #define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/ext3_fs.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/ext3_fs.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/ext3_fs.h 2012-01-16 15:01:39.852725646 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/ext3_fs.h 2012-01-16 14:51:21.921408687 +0100 @@ -173,10 +173,14 @@ #define EXT3_NOTAIL_FL 0x00008000 /* file tail should not be merged */ #define EXT3_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ #define EXT3_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ +#define EXT3_IXUNLINK_FL 0x08000000 /* Immutable invert on unlink */ #define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */ -#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ -#define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ +#define EXT3_BARRIER_FL 0x04000000 /* Barrier for chroot() */ +#define EXT3_COW_FL 0x20000000 /* Copy on Write marker */ + +#define EXT3_FL_USER_VISIBLE 0x0103DFFF /* User visible flags */ +#define EXT3_FL_USER_MODIFIABLE 0x010380FF /* User modifiable flags */ /* Flags that should be inherited by new inodes from their parent. */ #define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\ @@ -320,7 +324,8 @@ __u16 i_pad1; __le16 l_i_uid_high; /* these 2 fields */ __le16 l_i_gid_high; /* were reserved2[0] */ - __u32 l_i_reserved2; + __le16 l_i_tag; /* Context Tag */ + __u16 l_i_reserved2; } linux2; struct { __u8 h_i_frag; /* Fragment number */ @@ -351,6 +356,7 @@ #define i_gid_low i_gid #define i_uid_high osd2.linux2.l_i_uid_high #define i_gid_high osd2.linux2.l_i_gid_high +#define i_raw_tag osd2.linux2.l_i_tag #define i_reserved2 osd2.linux2.l_i_reserved2 #elif defined(__GNU__) @@ -414,6 +420,7 @@ #define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ #define EXT3_MOUNT_DATA_ERR_ABORT 0x400000 /* Abort on file data write * error in ordered mode */ +#define EXT3_MOUNT_TAGGED (1<<24) /* Enable Context Tags */ /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */ #ifndef _LINUX_EXT2_FS_H @@ -892,6 +899,7 @@ extern void ext3_set_aops(struct inode *inode); extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len); +extern int ext3_sync_flags(struct inode *, int, int); /* ioctl.c */ extern long ext3_ioctl(struct file *, unsigned int, unsigned long); diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/fs.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/fs.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/fs.h 2012-01-16 15:01:40.488723395 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/fs.h 2012-01-16 14:51:21.925408673 +0100 @@ -209,6 +209,9 @@ #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ #define MS_I_VERSION (1<<23) /* Update inode I_version field */ #define MS_STRICTATIME (1<<24) /* Always perform atime updates */ +#define MS_TAGGED (1<<25) /* use generic inode tagging */ +#define MS_TAGID (1<<26) /* use specific tag for this mount */ +#define MS_NOTAGCHECK (1<<27) /* don't check tags */ #define MS_ACTIVE (1<<30) #define MS_NOUSER (1<<31) @@ -241,6 +244,14 @@ #define S_NOCMTIME 128 /* Do not update file c/mtime */ #define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */ #define S_PRIVATE 512 /* Inode is fs-internal */ +#define S_IXUNLINK 1024 /* Immutable Invert on unlink */ + +/* Linux-VServer related Inode flags */ + +#define V_VALID 1 +#define V_XATTR 2 +#define V_BARRIER 4 /* Barrier for chroot() */ +#define V_COW 8 /* Copy on Write */ /* * Note that nosuid etc flags are inode-specific: setting some file-system @@ -263,12 +274,15 @@ #define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \ ((inode)->i_flags & (S_SYNC|S_DIRSYNC))) #define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK) -#define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME) -#define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION) +#define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME) +#define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION) +#define IS_TAGGED(inode) __IS_FLG(inode, MS_TAGGED) #define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA) #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND) #define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE) +#define IS_IXUNLINK(inode) ((inode)->i_flags & S_IXUNLINK) +#define IS_IXORUNLINK(inode) ((IS_IXUNLINK(inode) ? S_IMMUTABLE : 0) ^ IS_IMMUTABLE(inode)) #define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL) #define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD) @@ -276,6 +290,16 @@ #define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE) #define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE) +#define IS_BARRIER(inode) (S_ISDIR((inode)->i_mode) && ((inode)->i_vflags & V_BARRIER)) + +#ifdef CONFIG_VSERVER_COWBL +# define IS_COW(inode) (IS_IXUNLINK(inode) && IS_IMMUTABLE(inode)) +# define IS_COW_LINK(inode) (S_ISREG((inode)->i_mode) && ((inode)->i_nlink > 1)) +#else +# define IS_COW(inode) (0) +# define IS_COW_LINK(inode) (0) +#endif + /* the read-only stuff doesn't really belong here, but any other place is probably as bad and I don't want to create yet another include file. */ @@ -357,11 +381,14 @@ #define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ #define FS_EXTENT_FL 0x00080000 /* Extents */ #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ +#define FS_IXUNLINK_FL 0x08000000 /* Immutable invert on unlink */ #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ -#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ -#define FS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ +#define FS_BARRIER_FL 0x04000000 /* Barrier for chroot() */ +#define FS_COW_FL 0x20000000 /* Copy on Write marker */ +#define FS_FL_USER_VISIBLE 0x0103DFFF /* User visible flags */ +#define FS_FL_USER_MODIFIABLE 0x010380FF /* User modifiable flags */ #define SYNC_FILE_RANGE_WAIT_BEFORE 1 #define SYNC_FILE_RANGE_WRITE 2 @@ -444,6 +471,7 @@ #define ATTR_KILL_PRIV (1 << 14) #define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */ #define ATTR_TIMES_SET (1 << 16) +#define ATTR_TAG (1 << 17) /* * This is the Inode Attributes structure, used for notify_change(). It @@ -459,6 +487,7 @@ umode_t ia_mode; uid_t ia_uid; gid_t ia_gid; + tag_t ia_tag; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; @@ -472,6 +501,9 @@ struct file *ia_file; }; +#define ATTR_FLAG_BARRIER 512 /* Barrier for chroot() */ +#define ATTR_FLAG_IXUNLINK 1024 /* Immutable invert on unlink */ + /* * Includes for diskquotas. */ @@ -738,7 +770,9 @@ unsigned int i_nlink; uid_t i_uid; gid_t i_gid; + tag_t i_tag; dev_t i_rdev; + dev_t i_mdev; u64 i_version; loff_t i_size; #ifdef __NEED_I_SIZE_ORDERED @@ -785,7 +819,8 @@ unsigned long i_state; unsigned long dirtied_when; /* jiffies of first dirtying */ - unsigned int i_flags; + unsigned short i_flags; + unsigned short i_vflags; atomic_t i_writecount; #ifdef CONFIG_SECURITY @@ -873,12 +908,12 @@ static inline unsigned iminor(const struct inode *inode) { - return MINOR(inode->i_rdev); + return MINOR(inode->i_mdev); } static inline unsigned imajor(const struct inode *inode) { - return MAJOR(inode->i_rdev); + return MAJOR(inode->i_mdev); } extern struct block_device *I_BDEV(struct inode *inode); @@ -937,6 +972,7 @@ loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; + xid_t f_xid; struct file_ra_state f_ra; u64 f_version; @@ -1078,6 +1114,7 @@ struct file *fl_file; loff_t fl_start; loff_t fl_end; + xid_t fl_xid; struct fasync_struct * fl_fasync; /* for lease break notifications */ unsigned long fl_break_time; /* for nonblocking lease breaks */ @@ -1548,6 +1585,7 @@ ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*removexattr) (struct dentry *, const char *); + int (*sync_flags) (struct inode *, int, int); void (*truncate_range)(struct inode *, loff_t, loff_t); long (*fallocate)(struct inode *inode, int mode, loff_t offset, loff_t len); @@ -1568,6 +1606,7 @@ unsigned long, loff_t *); extern ssize_t vfs_writev(struct file *, const struct iovec __user *, unsigned long, loff_t *); +ssize_t vfs_sendfile(struct file *, struct file *, loff_t *, size_t, loff_t); struct super_operations { struct inode *(*alloc_inode)(struct super_block *sb); @@ -2376,6 +2415,7 @@ extern int dcache_dir_close(struct inode *, struct file *); extern loff_t dcache_dir_lseek(struct file *, loff_t, int); extern int dcache_readdir(struct file *, void *, filldir_t); +extern int dcache_readdir_filter(struct file *, void *, filldir_t, int (*)(struct dentry *)); extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *); extern int simple_statfs(struct dentry *, struct kstatfs *); extern int simple_link(struct dentry *, struct inode *, struct dentry *); diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/fs.h.orig kernel-2.6.32.54.vs/linux-2.6.32/include/linux/fs.h.orig --- kernel-2.6.32.54/linux-2.6.32/include/linux/fs.h.orig 2012-01-16 15:01:40.328723961 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/fs.h.orig 2012-01-16 14:47:19.658254217 +0100 @@ -1587,6 +1587,8 @@ void (*clear_inode) (struct inode *); void (*umount_begin) (struct super_block *); + void (*sync_inodes)(struct super_block *sb, + struct writeback_control *wbc); int (*show_options)(struct seq_file *, struct vfsmount *); int (*show_stats)(struct seq_file *, struct vfsmount *); #ifdef CONFIG_QUOTA diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/gfs2_ondisk.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/gfs2_ondisk.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/gfs2_ondisk.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/gfs2_ondisk.h 2012-01-16 14:51:21.925408673 +0100 @@ -235,6 +235,9 @@ gfs2fl_NoAtime = 7, gfs2fl_Sync = 8, gfs2fl_System = 9, + gfs2fl_IXUnlink = 16, + gfs2fl_Barrier = 17, + gfs2fl_Cow = 18, gfs2fl_TruncInProg = 29, gfs2fl_InheritDirectio = 30, gfs2fl_InheritJdata = 31, @@ -251,6 +254,9 @@ #define GFS2_DIF_NOATIME 0x00000080 #define GFS2_DIF_SYNC 0x00000100 #define GFS2_DIF_SYSTEM 0x00000200 /* New in gfs2 */ +#define GFS2_DIF_IXUNLINK 0x00010000 +#define GFS2_DIF_BARRIER 0x00020000 +#define GFS2_DIF_COW 0x00040000 #define GFS2_DIF_TRUNC_IN_PROG 0x20000000 /* New in gfs2 */ #define GFS2_DIF_INHERIT_DIRECTIO 0x40000000 #define GFS2_DIF_INHERIT_JDATA 0x80000000 diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/if_tun.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/if_tun.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/if_tun.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/if_tun.h 2012-01-16 14:51:21.925408673 +0100 @@ -48,6 +48,7 @@ #define TUNGETIFF _IOR('T', 210, unsigned int) #define TUNGETSNDBUF _IOR('T', 211, int) #define TUNSETSNDBUF _IOW('T', 212, int) +#define TUNSETNID _IOW('T', 215, int) /* TUNSETIFF ifr flags */ #define IFF_TUN 0x0001 diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/init_task.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/init_task.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/init_task.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/init_task.h 2012-01-16 14:51:21.925408673 +0100 @@ -184,6 +184,10 @@ INIT_FTRACE_GRAPH \ INIT_TRACE_RECURSION \ INIT_TASK_RCU_PREEMPT(tsk) \ + .xid = 0, \ + .vx_info = NULL, \ + .nid = 0, \ + .nx_info = NULL, \ } diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/ipc.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/ipc.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/ipc.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/ipc.h 2012-01-16 14:51:21.925408673 +0100 @@ -91,6 +91,7 @@ key_t key; uid_t uid; gid_t gid; + xid_t xid; uid_t cuid; gid_t cgid; mode_t mode; diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/Kbuild kernel-2.6.32.54.vs/linux-2.6.32/include/linux/Kbuild --- kernel-2.6.32.54/linux-2.6.32/include/linux/Kbuild 2012-01-16 15:01:40.272724159 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/Kbuild 2012-01-16 14:51:21.917408701 +0100 @@ -383,5 +383,8 @@ unifdef-y += xfrm.h objhdr-y += version.h + +header-y += vserver/ header-y += wimax.h header-y += wimax/ + diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/Kbuild.orig kernel-2.6.32.54.vs/linux-2.6.32/include/linux/Kbuild.orig --- kernel-2.6.32.54/linux-2.6.32/include/linux/Kbuild.orig 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/Kbuild.orig 2012-01-16 14:47:19.470254871 +0100 @@ -0,0 +1,387 @@ +header-y += byteorder/ +header-y += can/ +header-y += dvb/ +header-y += hdlc/ +header-y += isdn/ +header-y += nfsd/ +header-y += raid/ +header-y += spi/ +header-y += sunrpc/ +header-y += tc_act/ +header-y += tc_ematch/ +header-y += netfilter/ +header-y += netfilter_arp/ +header-y += netfilter_bridge/ +header-y += netfilter_ipv4/ +header-y += netfilter_ipv6/ +header-y += usb/ + +header-y += affs_hardblocks.h +header-y += aio_abi.h +header-y += arcfb.h +header-y += atmapi.h +header-y += atmarp.h +header-y += atmbr2684.h +header-y += atmclip.h +header-y += atm_eni.h +header-y += atm_he.h +header-y += atm_idt77105.h +header-y += atmioc.h +header-y += atmlec.h +header-y += atmmpc.h +header-y += atm_nicstar.h +header-y += atmppp.h +header-y += atmsap.h +header-y += atmsvc.h +header-y += atm_zatm.h +header-y += auto_fs4.h +header-y += ax25.h +header-y += b1lli.h +header-y += baycom.h +header-y += bfs_fs.h +header-y += blkpg.h +header-y += bpqether.h +header-y += bsg.h +header-y += can.h +header-y += cdk.h +header-y += chio.h +header-y += coda_psdev.h +header-y += coff.h +header-y += comstats.h +header-y += const.h +header-y += cgroupstats.h +header-y += cramfs_fs.h +header-y += cycx_cfm.h +header-y += dcbnl.h +header-y += dlmconstants.h +header-y += dlm_device.h +header-y += dlm_netlink.h +header-y += dm-ioctl.h +header-y += dm-log-userspace.h +header-y += dn.h +header-y += dqblk_xfs.h +header-y += efs_fs_sb.h +header-y += elf-fdpic.h +header-y += elf-em.h +header-y += fadvise.h +header-y += falloc.h +header-y += fd.h +header-y += fdreg.h +header-y += fib_rules.h +header-y += fiemap.h +header-y += firewire-cdev.h +header-y += firewire-constants.h +header-y += fuse.h +header-y += genetlink.h +header-y += gen_stats.h +header-y += gfs2_ondisk.h +header-y += gigaset_dev.h +header-y += hysdn_if.h +header-y += i2o-dev.h +header-y += i8k.h +header-y += if_addrlabel.h +header-y += if_arcnet.h +header-y += if_bonding.h +header-y += if_cablemodem.h +header-y += if_fc.h +header-y += if.h +header-y += if_hippi.h +header-y += if_infiniband.h +header-y += if_packet.h +header-y += if_plip.h +header-y += if_ppp.h +header-y += if_slip.h +header-y += if_strip.h +header-y += if_tun.h +header-y += in_route.h +header-y += ioctl.h +header-y += ip6_tunnel.h +header-y += ipmi_msgdefs.h +header-y += ipsec.h +header-y += ip_vs.h +header-y += ipx.h +header-y += irda.h +header-y += iso_fs.h +header-y += ixjuser.h +header-y += jffs2.h +header-y += keyctl.h +header-y += limits.h +header-y += magic.h +header-y += major.h +header-y += map_to_7segment.h +header-y += matroxfb.h +header-y += meye.h +header-y += minix_fs.h +header-y += mmtimer.h +header-y += mqueue.h +header-y += mtio.h +header-y += ncp_no.h +header-y += neighbour.h +header-y += net_dropmon.h +header-y += netfilter_arp.h +header-y += netrom.h +header-y += nfs2.h +header-y += nfs4_mount.h +header-y += nfs_mount.h +header-y += nl80211.h +header-y += param.h +header-y += pci_regs.h +header-y += perf_event.h +header-y += pfkeyv2.h +header-y += pg.h +header-y += phantom.h +header-y += phonet.h +header-y += pkt_cls.h +header-y += pkt_sched.h +header-y += posix_types.h +header-y += ppdev.h +header-y += prctl.h +header-y += qnxtypes.h +header-y += qnx4_fs.h +header-y += radeonfb.h +header-y += raw.h +header-y += resource.h +header-y += romfs_fs.h +header-y += rose.h +header-y += serial_reg.h +header-y += smbno.h +header-y += snmp.h +header-y += sockios.h +header-y += som.h +header-y += sound.h +header-y += suspend_ioctls.h +header-y += taskstats.h +header-y += telephony.h +header-y += termios.h +header-y += times.h +header-y += tiocl.h +header-y += tipc.h +header-y += tipc_config.h +header-y += toshiba.h +header-y += udf_fs_i.h +header-y += ultrasound.h +header-y += un.h +header-y += utime.h +header-y += veth.h +header-y += videotext.h +header-y += x25.h + +unifdef-y += acct.h +unifdef-y += adb.h +unifdef-y += adfs_fs.h +unifdef-y += agpgart.h +ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \ + $(srctree)/include/asm-$(SRCARCH)/a.out.h),) +unifdef-y += a.out.h +endif +unifdef-y += apm_bios.h +unifdef-y += atalk.h +unifdef-y += atmdev.h +unifdef-y += atm.h +unifdef-y += atm_tcp.h +unifdef-y += audit.h +unifdef-y += auto_fs.h +unifdef-y += auxvec.h +unifdef-y += binfmts.h +unifdef-y += blktrace_api.h +unifdef-y += capability.h +unifdef-y += capi.h +unifdef-y += cciss_ioctl.h +unifdef-y += cdrom.h +unifdef-y += cm4000_cs.h +unifdef-y += cn_proc.h +unifdef-y += coda.h +unifdef-y += connector.h +unifdef-y += cuda.h +unifdef-y += cyclades.h +unifdef-y += dccp.h +unifdef-y += dlm.h +unifdef-y += dlm_plock.h +unifdef-y += edd.h +unifdef-y += elf.h +unifdef-y += elfcore.h +unifdef-y += errno.h +unifdef-y += errqueue.h +unifdef-y += ethtool.h +unifdef-y += eventpoll.h +unifdef-y += signalfd.h +unifdef-y += ext2_fs.h +unifdef-y += fb.h +unifdef-y += fcntl.h +unifdef-y += filter.h +unifdef-y += flat.h +unifdef-y += futex.h +unifdef-y += fs.h +unifdef-y += freezer.h +unifdef-y += gameport.h +unifdef-y += generic_serial.h +unifdef-y += hayesesp.h +unifdef-y += hdlcdrv.h +unifdef-y += hdlc.h +unifdef-y += hdreg.h +unifdef-y += hid.h +unifdef-y += hiddev.h +unifdef-y += hidraw.h +unifdef-y += hpet.h +unifdef-y += i2c.h +unifdef-y += i2c-dev.h +unifdef-y += icmp.h +unifdef-y += icmpv6.h +unifdef-y += if_addr.h +unifdef-y += if_arp.h +unifdef-y += if_bridge.h +unifdef-y += if_ec.h +unifdef-y += if_eql.h +unifdef-y += if_ether.h +unifdef-y += if_fddi.h +unifdef-y += if_frad.h +unifdef-y += if_ltalk.h +unifdef-y += if_link.h +unifdef-y += if_phonet.h +unifdef-y += if_pppol2tp.h +unifdef-y += if_pppox.h +unifdef-y += if_tr.h +unifdef-y += if_tunnel.h +unifdef-y += if_vlan.h +unifdef-y += igmp.h +unifdef-y += inet_diag.h +unifdef-y += in.h +unifdef-y += in6.h +unifdef-y += inotify.h +unifdef-y += input.h +unifdef-y += ip.h +unifdef-y += ipc.h +unifdef-y += ipmi.h +unifdef-y += ipv6.h +unifdef-y += ipv6_route.h +unifdef-y += isdn.h +unifdef-y += isdnif.h +unifdef-y += isdn_divertif.h +unifdef-y += isdn_ppp.h +unifdef-y += ivtv.h +unifdef-y += ivtvfb.h +unifdef-y += joystick.h +unifdef-y += kdev_t.h +unifdef-y += kd.h +unifdef-y += kernelcapi.h +unifdef-y += kernel.h +unifdef-y += keyboard.h +ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \ + $(srctree)/include/asm-$(SRCARCH)/kvm.h),) +unifdef-y += kvm.h +endif +ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm_para.h \ + $(srctree)/include/asm-$(SRCARCH)/kvm_para.h),) +unifdef-y += kvm_para.h +endif +unifdef-y += llc.h +unifdef-y += loop.h +unifdef-y += lp.h +unifdef-y += mempolicy.h +unifdef-y += mii.h +unifdef-y += mman.h +unifdef-y += mroute.h +unifdef-y += mroute6.h +unifdef-y += msdos_fs.h +unifdef-y += msg.h +unifdef-y += nbd.h +unifdef-y += ncp_fs.h +unifdef-y += ncp.h +unifdef-y += ncp_mount.h +unifdef-y += netdevice.h +unifdef-y += netfilter_bridge.h +unifdef-y += netfilter_decnet.h +unifdef-y += netfilter.h +unifdef-y += netfilter_ipv4.h +unifdef-y += netfilter_ipv6.h +unifdef-y += net.h +unifdef-y += netlink.h +unifdef-y += nfs3.h +unifdef-y += nfs4.h +unifdef-y += nfsacl.h +unifdef-y += nfs_fs.h +unifdef-y += nfs.h +unifdef-y += nfs_idmap.h +unifdef-y += n_r3964.h +unifdef-y += nubus.h +unifdef-y += nvram.h +unifdef-y += oom.h +unifdef-y += parport.h +unifdef-y += patchkey.h +unifdef-y += pci.h +unifdef-y += personality.h +unifdef-y += pktcdvd.h +unifdef-y += pmu.h +unifdef-y += poll.h +unifdef-y += ppp_defs.h +unifdef-y += ppp-comp.h +unifdef-y += pps.h +unifdef-y += ptrace.h +unifdef-y += quota.h +unifdef-y += random.h +unifdef-y += rfkill.h +unifdef-y += irqnr.h +unifdef-y += reboot.h +unifdef-y += reiserfs_fs.h +unifdef-y += reiserfs_xattr.h +unifdef-y += route.h +unifdef-y += rtc.h +unifdef-y += rtnetlink.h +unifdef-y += scc.h +unifdef-y += sched.h +unifdef-y += screen_info.h +unifdef-y += sdla.h +unifdef-y += selinux_netlink.h +unifdef-y += sem.h +unifdef-y += serial_core.h +unifdef-y += serial.h +unifdef-y += serio.h +unifdef-y += shm.h +unifdef-y += signal.h +unifdef-y += smb_fs.h +unifdef-y += smb.h +unifdef-y += smb_mount.h +unifdef-y += socket.h +unifdef-y += sonet.h +unifdef-y += sonypi.h +unifdef-y += soundcard.h +unifdef-y += stat.h +unifdef-y += stddef.h +unifdef-y += string.h +unifdef-y += swab.h +unifdef-y += synclink.h +unifdef-y += sysctl.h +unifdef-y += tcp.h +unifdef-y += time.h +unifdef-y += timex.h +unifdef-y += tty.h +unifdef-y += types.h +unifdef-y += udp.h +unifdef-y += uinput.h +unifdef-y += uio.h +unifdef-y += unistd.h +unifdef-y += usbdevice_fs.h +unifdef-y += utsname.h +unifdef-y += videodev2.h +unifdef-y += videodev.h +unifdef-y += virtio_config.h +unifdef-y += virtio_ids.h +unifdef-y += virtio_blk.h +unifdef-y += virtio_net.h +unifdef-y += virtio_9p.h +unifdef-y += virtio_balloon.h +unifdef-y += virtio_console.h +unifdef-y += virtio_pci.h +unifdef-y += virtio_ring.h +unifdef-y += virtio_rng.h +unifdef-y += vt.h +unifdef-y += wait.h +unifdef-y += wanrouter.h +unifdef-y += watchdog.h +unifdef-y += wireless.h +unifdef-y += xattr.h +unifdef-y += xfrm.h + +objhdr-y += version.h +header-y += wimax.h +header-y += wimax/ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/loop.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/loop.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/loop.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/loop.h 2012-01-16 14:51:21.925408673 +0100 @@ -45,6 +45,7 @@ struct loop_func_table *lo_encryption; __u32 lo_init[2]; uid_t lo_key_owner; /* Who set the key */ + xid_t lo_xid; int (*ioctl)(struct loop_device *, int cmd, unsigned long arg); diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/magic.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/magic.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/magic.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/magic.h 2012-01-16 14:51:21.925408673 +0100 @@ -3,7 +3,7 @@ #define ADFS_SUPER_MAGIC 0xadf5 #define AFFS_SUPER_MAGIC 0xadff -#define AFS_SUPER_MAGIC 0x5346414F +#define AFS_SUPER_MAGIC 0x5346414F #define AUTOFS_SUPER_MAGIC 0x0187 #define CODA_SUPER_MAGIC 0x73757245 #define CRAMFS_MAGIC 0x28cd3d45 /* some random number */ @@ -38,6 +38,7 @@ #define NFS_SUPER_MAGIC 0x6969 #define OPENPROM_SUPER_MAGIC 0x9fa1 #define PROC_SUPER_MAGIC 0x9fa0 +#define DEVPTS_SUPER_MAGIC 0x1cd1 #define QNX4_SUPER_MAGIC 0x002f /* qnx4 fs detection */ #define REISERFS_SUPER_MAGIC 0x52654973 /* used by gcc */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/major.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/major.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/major.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/major.h 2012-01-16 14:51:21.925408673 +0100 @@ -15,6 +15,7 @@ #define HD_MAJOR IDE0_MAJOR #define PTY_SLAVE_MAJOR 3 #define TTY_MAJOR 4 +#define VROOT_MAJOR 4 #define TTYAUX_MAJOR 5 #define LP_MAJOR 6 #define VCS_MAJOR 7 diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/memcontrol.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/memcontrol.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/memcontrol.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/memcontrol.h 2012-01-16 14:51:21.929408659 +0100 @@ -70,6 +70,13 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); +extern u64 mem_cgroup_res_read_u64(struct mem_cgroup *mem, int member); +extern u64 mem_cgroup_memsw_read_u64(struct mem_cgroup *mem, int member); + +extern s64 mem_cgroup_stat_read_cache(struct mem_cgroup *mem); +extern s64 mem_cgroup_stat_read_anon(struct mem_cgroup *mem); +extern s64 mem_cgroup_stat_read_mapped(struct mem_cgroup *mem); + static inline int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) { diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/mm_types.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/mm_types.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/mm_types.h 2012-01-16 15:01:39.864725604 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/mm_types.h 2012-01-16 14:51:21.941408617 +0100 @@ -246,6 +246,7 @@ /* Architecture-specific MM context */ mm_context_t context; + struct vx_info *mm_vx_info; /* Swap token stuff */ /* diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/mount.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/mount.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/mount.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/mount.h 2012-01-16 14:51:21.941408617 +0100 @@ -36,6 +36,9 @@ #define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */ #define MNT_PNODE_MASK 0x3000 /* propagation flag mask */ +#define MNT_TAGID 0x10000 +#define MNT_NOTAG 0x20000 + struct vfsmount { struct list_head mnt_hash; struct vfsmount *mnt_parent; /* fs we are mounted on */ @@ -70,6 +73,7 @@ #else int mnt_writers; #endif + tag_t mnt_tag; /* tagging used for vfsmount */ }; static inline int *get_mnt_writers_ptr(struct vfsmount *mnt) diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/net.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/net.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/net.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/net.h 2012-01-16 14:51:21.941408617 +0100 @@ -69,6 +69,7 @@ #define SOCK_NOSPACE 2 #define SOCK_PASSCRED 3 #define SOCK_PASSSEC 4 +#define SOCK_USER_SOCKET 5 #ifndef ARCH_HAS_SOCKET_TYPES /** diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/nfs_mount.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/nfs_mount.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/nfs_mount.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/nfs_mount.h 2012-01-16 14:51:21.941408617 +0100 @@ -63,7 +63,8 @@ #define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 */ #define NFS_MOUNT_NORDIRPLUS 0x4000 /* 5 */ #define NFS_MOUNT_UNSHARED 0x8000 /* 5 */ -#define NFS_MOUNT_FLAGMASK 0xFFFF +#define NFS_MOUNT_TAGGED 0x10000 /* context tagging */ +#define NFS_MOUNT_FLAGMASK 0x1FFFF /* The following are for internal use only */ #define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000 diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/nsproxy.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/nsproxy.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/nsproxy.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/nsproxy.h 2012-01-16 14:51:21.941408617 +0100 @@ -3,6 +3,7 @@ #include #include +#include struct mnt_namespace; struct uts_namespace; @@ -63,22 +64,33 @@ } int copy_namespaces(unsigned long flags, struct task_struct *tsk); +struct nsproxy *copy_nsproxy(struct nsproxy *orig); void exit_task_namespaces(struct task_struct *tsk); void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new); void free_nsproxy(struct nsproxy *ns); int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **, struct fs_struct *); -static inline void put_nsproxy(struct nsproxy *ns) +#define get_nsproxy(n) __get_nsproxy(n, __FILE__, __LINE__) + +static inline void __get_nsproxy(struct nsproxy *ns, + const char *_file, int _line) { - if (atomic_dec_and_test(&ns->count)) { - free_nsproxy(ns); - } + vxlprintk(VXD_CBIT(space, 0), "get_nsproxy(%p[%u])", + ns, atomic_read(&ns->count), _file, _line); + atomic_inc(&ns->count); } -static inline void get_nsproxy(struct nsproxy *ns) +#define put_nsproxy(n) __put_nsproxy(n, __FILE__, __LINE__) + +static inline void __put_nsproxy(struct nsproxy *ns, + const char *_file, int _line) { - atomic_inc(&ns->count); + vxlprintk(VXD_CBIT(space, 0), "put_nsproxy(%p[%u])", + ns, atomic_read(&ns->count), _file, _line); + if (atomic_dec_and_test(&ns->count)) { + free_nsproxy(ns); + } } #ifdef CONFIG_CGROUP_NS diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/pid.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/pid.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/pid.h 2012-01-16 15:01:39.864725604 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/pid.h 2012-01-16 14:51:21.945408603 +0100 @@ -8,7 +8,8 @@ PIDTYPE_PID, PIDTYPE_PGID, PIDTYPE_SID, - PIDTYPE_MAX + PIDTYPE_MAX, + PIDTYPE_REALPID }; /* @@ -160,6 +161,7 @@ } pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns); +pid_t pid_unmapped_nr_ns(struct pid *pid, struct pid_namespace *ns); pid_t pid_vnr(struct pid *pid); #define do_each_pid_task(pid, type, task) \ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/proc_fs.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/proc_fs.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/proc_fs.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/proc_fs.h 2012-01-16 14:51:21.945408603 +0100 @@ -56,6 +56,7 @@ nlink_t nlink; uid_t uid; gid_t gid; + int vx_flags; loff_t size; const struct inode_operations *proc_iops; /* @@ -250,12 +251,18 @@ extern void kclist_add(struct kcore_list *, void *, size_t, int type); #endif +struct vx_info; +struct nx_info; + union proc_op { int (*proc_get_link)(struct inode *, struct path *); int (*proc_read)(struct task_struct *task, char *page); int (*proc_show)(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task); + int (*proc_vs_read)(char *page); + int (*proc_vxi_read)(struct vx_info *vxi, char *page); + int (*proc_nxi_read)(struct nx_info *nxi, char *page); }; struct ctl_table_header; @@ -263,6 +270,7 @@ struct proc_inode { struct pid *pid; + int vx_flags; int fd; union proc_op op; struct proc_dir_entry *pde; diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/quotaops.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/quotaops.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/quotaops.h 2012-01-16 15:01:39.864725604 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/quotaops.h 2012-01-16 14:51:21.945408603 +0100 @@ -8,6 +8,7 @@ #define _LINUX_QUOTAOPS_ #include +#include static inline struct quota_info *sb_dqopt(struct super_block *sb) { @@ -157,10 +158,14 @@ * a transaction (deadlocks possible otherwise) */ static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr) { + if (dl_alloc_space(inode, nr)) + return 1; if (sb_any_quota_active(inode->i_sb)) { /* Used space is updated in alloc_space() */ - if (inode->i_sb->dq_op->alloc_space(inode, nr, 1) == NO_QUOTA) + if (inode->i_sb->dq_op->alloc_space(inode, nr, 1) == NO_QUOTA) { + dl_free_space(inode, nr); return 1; + } } else inode_add_bytes(inode, nr); @@ -177,10 +182,14 @@ static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr) { + if (dl_alloc_space(inode, nr)) + return 1; if (sb_any_quota_active(inode->i_sb)) { /* Used space is updated in alloc_space() */ - if (inode->i_sb->dq_op->alloc_space(inode, nr, 0) == NO_QUOTA) + if (inode->i_sb->dq_op->alloc_space(inode, nr, 0) == NO_QUOTA) { + dl_free_space(inode, nr); return 1; + } } else inode_add_bytes(inode, nr); @@ -197,10 +206,14 @@ static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr) { + if (dl_reserve_space(inode, nr)) + return 1; if (sb_any_quota_active(inode->i_sb)) { /* Used space is updated in alloc_space() */ - if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA) + if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA) { + dl_release_space(inode, nr); return 1; + } } else inode_add_rsv_space(inode, nr); @@ -209,10 +222,14 @@ static inline int vfs_dq_alloc_inode(struct inode *inode) { + if (dl_alloc_inode(inode)) + return 1; if (sb_any_quota_active(inode->i_sb)) { vfs_dq_init(inode); - if (inode->i_sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) + if (inode->i_sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) { + dl_free_inode(inode); return 1; + } } return 0; } @@ -222,9 +239,13 @@ */ static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr) { + if (dl_claim_space(inode, nr)) + return 1; if (sb_any_quota_active(inode->i_sb)) { - if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA) + if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA) { + dl_release_space(inode, nr); return 1; + } } else inode_claim_rsv_space(inode, nr); @@ -242,6 +263,7 @@ inode->i_sb->dq_op->release_rsv(inode, nr); else inode_sub_rsv_space(inode, nr); + dl_release_space(inode, nr); } static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) @@ -250,6 +272,7 @@ inode->i_sb->dq_op->free_space(inode, nr); else inode_sub_bytes(inode, nr); + dl_free_space(inode, nr); } static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr) @@ -262,6 +285,7 @@ { if (sb_any_quota_active(inode->i_sb)) inode->i_sb->dq_op->free_inode(inode, 1); + dl_free_inode(inode); } /* Cannot be called inside a transaction */ @@ -365,6 +389,8 @@ static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr) { + if (dl_alloc_space(inode, nr)) + return 1; inode_add_bytes(inode, nr); return 0; } @@ -378,6 +404,8 @@ static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr) { + if (dl_alloc_space(inode, nr)) + return 1; inode_add_bytes(inode, nr); return 0; } @@ -391,22 +419,28 @@ static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr) { + if (dl_reserve_space(inode, nr)) + return 1; return 0; } static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr) { + if (dl_claim_space(inode, nr)) + return 1; return vfs_dq_alloc_space(inode, nr); } static inline int vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr) { + dl_release_space(inode, nr); return 0; } static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) { + dl_free_space(inode, nr); inode_sub_bytes(inode, nr); } diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/reboot.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/reboot.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/reboot.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/reboot.h 2012-01-16 14:51:21.945408603 +0100 @@ -33,6 +33,7 @@ #define LINUX_REBOOT_CMD_RESTART2 0xA1B2C3D4 #define LINUX_REBOOT_CMD_SW_SUSPEND 0xD000FCE2 #define LINUX_REBOOT_CMD_KEXEC 0x45584543 +#define LINUX_REBOOT_CMD_OOM 0xDEADBEEF #ifdef __KERNEL__ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/reiserfs_fs.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/reiserfs_fs.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/reiserfs_fs.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/reiserfs_fs.h 2012-01-16 14:51:21.949408589 +0100 @@ -899,6 +899,11 @@ #define REISERFS_COMPR_FL FS_COMPR_FL #define REISERFS_NOTAIL_FL FS_NOTAIL_FL +/* unfortunately reiserfs sdattr is only 16 bit */ +#define REISERFS_IXUNLINK_FL (FS_IXUNLINK_FL >> 16) +#define REISERFS_BARRIER_FL (FS_BARRIER_FL >> 16) +#define REISERFS_COW_FL (FS_COW_FL >> 16) + /* persistent flags that file inherits from the parent directory */ #define REISERFS_INHERIT_MASK ( REISERFS_IMMUTABLE_FL | \ REISERFS_SYNC_FL | \ @@ -908,6 +913,9 @@ REISERFS_COMPR_FL | \ REISERFS_NOTAIL_FL ) +#define REISERFS_FL_USER_VISIBLE 0x80FF +#define REISERFS_FL_USER_MODIFIABLE 0x80FF + /* Stat Data on disk (reiserfs version of UFS disk inode minus the address blocks) */ struct stat_data { @@ -1989,6 +1997,7 @@ void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode); void i_attrs_to_sd_attrs(struct inode *inode, __u16 * sd_attrs); int reiserfs_setattr(struct dentry *dentry, struct iattr *attr); +int reiserfs_sync_flags(struct inode *inode, int, int); /* namei.c */ void set_de_name_and_namelen(struct reiserfs_dir_entry *de); diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/reiserfs_fs_sb.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/reiserfs_fs_sb.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/reiserfs_fs_sb.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/reiserfs_fs_sb.h 2012-01-16 14:51:21.949408589 +0100 @@ -456,6 +456,7 @@ REISERFS_EXPOSE_PRIVROOT, REISERFS_BARRIER_NONE, REISERFS_BARRIER_FLUSH, + REISERFS_TAGGED, /* Actions on error */ REISERFS_ERROR_PANIC, diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/sched.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/sched.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/sched.h 2012-01-16 15:01:39.864725604 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/sched.h 2012-01-16 14:51:21.953408575 +0100 @@ -389,25 +389,28 @@ * The mm counters are not protected by its page_table_lock, * so must be incremented atomically. */ -#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value) -#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member)) -#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member) -#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) -#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) +#define __set_mm_counter(mm, member, value) \ + atomic_long_set(&(mm)->_##member, value) +#define get_mm_counter(mm, member) \ + ((unsigned long)atomic_long_read(&(mm)->_##member)) #else /* !USE_SPLIT_PTLOCKS */ /* * The mm counters are protected by its page_table_lock, * so can be incremented directly. */ -#define set_mm_counter(mm, member, value) (mm)->_##member = (value) +#define __set_mm_counter(mm, member, value) (mm)->_##member = (value) #define get_mm_counter(mm, member) ((mm)->_##member) -#define add_mm_counter(mm, member, value) (mm)->_##member += (value) -#define inc_mm_counter(mm, member) (mm)->_##member++ -#define dec_mm_counter(mm, member) (mm)->_##member-- #endif /* !USE_SPLIT_PTLOCKS */ +#define set_mm_counter(mm, member, value) \ + vx_ ## member ## pages_sub((mm), (get_mm_counter(mm, member) - value)) +#define add_mm_counter(mm, member, value) \ + vx_ ## member ## pages_add((mm), (value)) +#define inc_mm_counter(mm, member) vx_ ## member ## pages_inc((mm)) +#define dec_mm_counter(mm, member) vx_ ## member ## pages_dec((mm)) + #define get_mm_rss(mm) \ (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) #define update_hiwater_rss(mm) do { \ @@ -1185,6 +1188,12 @@ u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; +#ifdef CONFIG_CFS_HARD_LIMITS + u64 throttle_start; + u64 throttle_max; + u64 throttle_count; + u64 throttle_sum; +#endif #endif #ifdef CONFIG_FAIR_GROUP_SCHED @@ -1395,6 +1404,14 @@ #endif seccomp_t seccomp; +/* vserver context data */ + struct vx_info *vx_info; + struct nx_info *nx_info; + + xid_t xid; + nid_t nid; + tag_t tag; + /* Thread group tracking */ u32 parent_exec_id; u32 self_exec_id; @@ -1619,6 +1636,11 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); +#include +#include +#include +#include + static inline pid_t task_pid_nr(struct task_struct *tsk) { return tsk->pid; @@ -1632,7 +1654,8 @@ static inline pid_t task_pid_vnr(struct task_struct *tsk) { - return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); + // return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); + return vx_map_pid(__task_pid_nr_ns(tsk, PIDTYPE_PID, NULL)); } @@ -1645,7 +1668,7 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk) { - return pid_vnr(task_tgid(tsk)); + return vx_map_tgid(pid_vnr(task_tgid(tsk))); } diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/shmem_fs.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/shmem_fs.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/shmem_fs.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/shmem_fs.h 2012-01-16 14:51:21.953408575 +0100 @@ -8,6 +8,9 @@ #define SHMEM_NR_DIRECT 16 +#define TMPFS_SUPER_MAGIC 0x01021994 + + struct shmem_inode_info { spinlock_t lock; unsigned long flags; diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/stat.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/stat.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/stat.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/stat.h 2012-01-16 14:51:21.953408575 +0100 @@ -66,6 +66,7 @@ unsigned int nlink; uid_t uid; gid_t gid; + tag_t tag; dev_t rdev; loff_t size; struct timespec atime; diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/sunrpc/auth.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/sunrpc/auth.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/sunrpc/auth.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/sunrpc/auth.h 2012-01-16 14:51:21.953408575 +0100 @@ -25,6 +25,7 @@ struct auth_cred { uid_t uid; gid_t gid; + tag_t tag; struct group_info *group_info; unsigned char machine_cred : 1; }; diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/sunrpc/clnt.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/sunrpc/clnt.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/sunrpc/clnt.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/sunrpc/clnt.h 2012-01-16 14:51:21.953408575 +0100 @@ -49,7 +49,8 @@ unsigned int cl_softrtry : 1,/* soft timeouts */ cl_discrtry : 1,/* disconnect before retry */ cl_autobind : 1,/* use getport() */ - cl_chatty : 1;/* be verbose */ + cl_chatty : 1,/* be verbose */ + cl_tag : 1;/* context tagging */ struct rpc_rtt * cl_rtt; /* RTO estimator data */ const struct rpc_timeout *cl_timeout; /* Timeout strategy */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/syscalls.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/syscalls.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/syscalls.h 2012-01-16 15:01:39.872725575 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/syscalls.h 2012-01-16 14:51:21.953408575 +0100 @@ -548,6 +548,8 @@ asmlinkage long sys_unlink(const char __user *pathname); asmlinkage long sys_rename(const char __user *oldname, const char __user *newname); +asmlinkage long sys_copyfile(const char __user *from, const char __user *to, + umode_t mode); asmlinkage long sys_chmod(const char __user *filename, mode_t mode); asmlinkage long sys_fchmod(unsigned int fd, mode_t mode); diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/sysctl.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/sysctl.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/sysctl.h 2012-01-16 15:01:39.872725575 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/sysctl.h 2012-01-16 14:51:21.953408575 +0100 @@ -69,6 +69,7 @@ CTL_ABI=9, /* Binary emulation */ CTL_CPU=10, /* CPU stuff (speed scaling, etc) */ CTL_ARLAN=254, /* arlan wireless driver */ + CTL_VSERVER=4242, /* Linux-VServer debug */ CTL_S390DBF=5677, /* s390 debug */ CTL_SUNRPC=7249, /* sunrpc debug */ CTL_PM=9899, /* frv power management */ @@ -103,6 +104,7 @@ KERN_PANIC=15, /* int: panic timeout */ KERN_REALROOTDEV=16, /* real root device to mount after initrd */ + KERN_VSHELPER=17, /* string: path to vshelper policy agent */ KERN_SPARC_REBOOT=21, /* reboot command on Sparc */ KERN_CTLALTDEL=22, /* int: allow ctl-alt-del to reboot */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/sysfs.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/sysfs.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/sysfs.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/sysfs.h 2012-01-16 14:51:21.953408575 +0100 @@ -17,6 +17,8 @@ #include #include +#define SYSFS_SUPER_MAGIC 0x62656572 + struct kobject; struct module; diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/time.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/time.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/time.h 2012-01-16 15:01:39.872725575 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/time.h 2012-01-16 14:51:21.961408547 +0100 @@ -238,6 +238,9 @@ a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); a->tv_nsec = ns; } + +#include + #endif /* __KERNEL__ */ #define NFDBITS __NFDBITS diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/types.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/types.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/types.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/types.h 2012-01-16 14:51:21.961408547 +0100 @@ -37,6 +37,9 @@ typedef __kernel_gid32_t gid_t; typedef __kernel_uid16_t uid16_t; typedef __kernel_gid16_t gid16_t; +typedef unsigned int xid_t; +typedef unsigned int nid_t; +typedef unsigned int tag_t; typedef unsigned long uintptr_t; diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vroot.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vroot.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vroot.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vroot.h 2012-01-16 14:51:21.961408547 +0100 @@ -0,0 +1,51 @@ + +/* + * include/linux/vroot.h + * + * written by Herbert Pötzl, 9/11/2002 + * ported to 2.6 by Herbert Pötzl, 30/12/2004 + * + * Copyright (C) 2002-2007 by Herbert Pötzl. + * Redistribution of this file is permitted under the + * GNU General Public License. + */ + +#ifndef _LINUX_VROOT_H +#define _LINUX_VROOT_H + + +#ifdef __KERNEL__ + +/* Possible states of device */ +enum { + Vr_unbound, + Vr_bound, +}; + +struct vroot_device { + int vr_number; + int vr_refcnt; + + struct semaphore vr_ctl_mutex; + struct block_device *vr_device; + int vr_state; +}; + + +typedef struct block_device *(vroot_grb_func)(struct block_device *); + +extern int register_vroot_grb(vroot_grb_func *); +extern int unregister_vroot_grb(vroot_grb_func *); + +#endif /* __KERNEL__ */ + +#define MAX_VROOT_DEFAULT 8 + +/* + * IOCTL commands --- we will commandeer 0x56 ('V') + */ + +#define VROOT_SET_DEV 0x5600 +#define VROOT_CLR_DEV 0x5601 + +#endif /* _LINUX_VROOT_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vs_base.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_base.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vs_base.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_base.h 2012-01-16 14:51:21.961408547 +0100 @@ -0,0 +1,10 @@ +#ifndef _VS_BASE_H +#define _VS_BASE_H + +#include "vserver/base.h" +#include "vserver/check.h" +#include "vserver/debug.h" + +#else +#warning duplicate inclusion +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vs_context.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_context.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vs_context.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_context.h 2012-01-16 14:51:21.961408547 +0100 @@ -0,0 +1,242 @@ +#ifndef _VS_CONTEXT_H +#define _VS_CONTEXT_H + +#include "vserver/base.h" +#include "vserver/check.h" +#include "vserver/context.h" +#include "vserver/history.h" +#include "vserver/debug.h" + +#include + + +#define get_vx_info(i) __get_vx_info(i, __FILE__, __LINE__, __HERE__) + +static inline struct vx_info *__get_vx_info(struct vx_info *vxi, + const char *_file, int _line, void *_here) +{ + if (!vxi) + return NULL; + + vxlprintk(VXD_CBIT(xid, 2), "get_vx_info(%p[#%d.%d])", + vxi, vxi ? vxi->vx_id : 0, + vxi ? atomic_read(&vxi->vx_usecnt) : 0, + _file, _line); + __vxh_get_vx_info(vxi, _here); + + atomic_inc(&vxi->vx_usecnt); + return vxi; +} + + +extern void free_vx_info(struct vx_info *); + +#define put_vx_info(i) __put_vx_info(i, __FILE__, __LINE__, __HERE__) + +static inline void __put_vx_info(struct vx_info *vxi, + const char *_file, int _line, void *_here) +{ + if (!vxi) + return; + + vxlprintk(VXD_CBIT(xid, 2), "put_vx_info(%p[#%d.%d])", + vxi, vxi ? vxi->vx_id : 0, + vxi ? atomic_read(&vxi->vx_usecnt) : 0, + _file, _line); + __vxh_put_vx_info(vxi, _here); + + if (atomic_dec_and_test(&vxi->vx_usecnt)) + free_vx_info(vxi); +} + + +#define init_vx_info(p, i) \ + __init_vx_info(p, i, __FILE__, __LINE__, __HERE__) + +static inline void __init_vx_info(struct vx_info **vxp, struct vx_info *vxi, + const char *_file, int _line, void *_here) +{ + if (vxi) { + vxlprintk(VXD_CBIT(xid, 3), + "init_vx_info(%p[#%d.%d])", + vxi, vxi ? vxi->vx_id : 0, + vxi ? atomic_read(&vxi->vx_usecnt) : 0, + _file, _line); + __vxh_init_vx_info(vxi, vxp, _here); + + atomic_inc(&vxi->vx_usecnt); + } + *vxp = vxi; +} + + +#define set_vx_info(p, i) \ + __set_vx_info(p, i, __FILE__, __LINE__, __HERE__) + +static inline void __set_vx_info(struct vx_info **vxp, struct vx_info *vxi, + const char *_file, int _line, void *_here) +{ + struct vx_info *vxo; + + if (!vxi) + return; + + vxlprintk(VXD_CBIT(xid, 3), "set_vx_info(%p[#%d.%d])", + vxi, vxi ? vxi->vx_id : 0, + vxi ? atomic_read(&vxi->vx_usecnt) : 0, + _file, _line); + __vxh_set_vx_info(vxi, vxp, _here); + + atomic_inc(&vxi->vx_usecnt); + vxo = xchg(vxp, vxi); + BUG_ON(vxo); +} + + +#define clr_vx_info(p) __clr_vx_info(p, __FILE__, __LINE__, __HERE__) + +static inline void __clr_vx_info(struct vx_info **vxp, + const char *_file, int _line, void *_here) +{ + struct vx_info *vxo; + + vxo = xchg(vxp, NULL); + if (!vxo) + return; + + vxlprintk(VXD_CBIT(xid, 3), "clr_vx_info(%p[#%d.%d])", + vxo, vxo ? vxo->vx_id : 0, + vxo ? atomic_read(&vxo->vx_usecnt) : 0, + _file, _line); + __vxh_clr_vx_info(vxo, vxp, _here); + + if (atomic_dec_and_test(&vxo->vx_usecnt)) + free_vx_info(vxo); +} + + +#define claim_vx_info(v, p) \ + __claim_vx_info(v, p, __FILE__, __LINE__, __HERE__) + +static inline void __claim_vx_info(struct vx_info *vxi, + struct task_struct *task, + const char *_file, int _line, void *_here) +{ + vxlprintk(VXD_CBIT(xid, 3), "claim_vx_info(%p[#%d.%d.%d]) %p", + vxi, vxi ? vxi->vx_id : 0, + vxi ? atomic_read(&vxi->vx_usecnt) : 0, + vxi ? atomic_read(&vxi->vx_tasks) : 0, + task, _file, _line); + __vxh_claim_vx_info(vxi, task, _here); + + atomic_inc(&vxi->vx_tasks); +} + + +extern void unhash_vx_info(struct vx_info *); + +#define release_vx_info(v, p) \ + __release_vx_info(v, p, __FILE__, __LINE__, __HERE__) + +static inline void __release_vx_info(struct vx_info *vxi, + struct task_struct *task, + const char *_file, int _line, void *_here) +{ + vxlprintk(VXD_CBIT(xid, 3), "release_vx_info(%p[#%d.%d.%d]) %p", + vxi, vxi ? vxi->vx_id : 0, + vxi ? atomic_read(&vxi->vx_usecnt) : 0, + vxi ? atomic_read(&vxi->vx_tasks) : 0, + task, _file, _line); + __vxh_release_vx_info(vxi, task, _here); + + might_sleep(); + + if (atomic_dec_and_test(&vxi->vx_tasks)) + unhash_vx_info(vxi); +} + + +#define task_get_vx_info(p) \ + __task_get_vx_info(p, __FILE__, __LINE__, __HERE__) + +static inline struct vx_info *__task_get_vx_info(struct task_struct *p, + const char *_file, int _line, void *_here) +{ + struct vx_info *vxi; + + task_lock(p); + vxlprintk(VXD_CBIT(xid, 5), "task_get_vx_info(%p)", + p, _file, _line); + vxi = __get_vx_info(p->vx_info, _file, _line, _here); + task_unlock(p); + return vxi; +} + + +static inline void __wakeup_vx_info(struct vx_info *vxi) +{ + if (waitqueue_active(&vxi->vx_wait)) + wake_up_interruptible(&vxi->vx_wait); +} + + +#define enter_vx_info(v, s) __enter_vx_info(v, s, __FILE__, __LINE__) + +static inline void __enter_vx_info(struct vx_info *vxi, + struct vx_info_save *vxis, const char *_file, int _line) +{ + vxlprintk(VXD_CBIT(xid, 5), "enter_vx_info(%p[#%d],%p) %p[#%d,%p]", + vxi, vxi ? vxi->vx_id : 0, vxis, current, + current->xid, current->vx_info, _file, _line); + vxis->vxi = xchg(¤t->vx_info, vxi); + vxis->xid = current->xid; + current->xid = vxi ? vxi->vx_id : 0; +} + +#define leave_vx_info(s) __leave_vx_info(s, __FILE__, __LINE__) + +static inline void __leave_vx_info(struct vx_info_save *vxis, + const char *_file, int _line) +{ + vxlprintk(VXD_CBIT(xid, 5), "leave_vx_info(%p[#%d,%p]) %p[#%d,%p]", + vxis, vxis->xid, vxis->vxi, current, + current->xid, current->vx_info, _file, _line); + (void)xchg(¤t->vx_info, vxis->vxi); + current->xid = vxis->xid; +} + + +static inline void __enter_vx_admin(struct vx_info_save *vxis) +{ + vxis->vxi = xchg(¤t->vx_info, NULL); + vxis->xid = xchg(¤t->xid, (xid_t)0); +} + +static inline void __leave_vx_admin(struct vx_info_save *vxis) +{ + (void)xchg(¤t->xid, vxis->xid); + (void)xchg(¤t->vx_info, vxis->vxi); +} + +#define task_is_init(p) \ + __task_is_init(p, __FILE__, __LINE__, __HERE__) + +static inline int __task_is_init(struct task_struct *p, + const char *_file, int _line, void *_here) +{ + int is_init = is_global_init(p); + + task_lock(p); + if (p->vx_info) + is_init = p->vx_info->vx_initpid == p->pid; + task_unlock(p); + return is_init; +} + +extern void exit_vx_info(struct task_struct *, int); +extern void exit_vx_info_early(struct task_struct *, int); + + +#else +#warning duplicate inclusion +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vs_cowbl.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_cowbl.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vs_cowbl.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_cowbl.h 2012-01-16 14:51:21.961408547 +0100 @@ -0,0 +1,47 @@ +#ifndef _VS_COWBL_H +#define _VS_COWBL_H + +#include +#include +#include + +extern struct dentry *cow_break_link(const char *pathname); + +static inline int cow_check_and_break(struct path *path) +{ + struct inode *inode = path->dentry->d_inode; + int error = 0; + + /* do we need this check? */ + if (IS_RDONLY(inode)) + return -EROFS; + + if (IS_COW(inode)) { + if (IS_COW_LINK(inode)) { + struct dentry *new_dentry, *old_dentry = path->dentry; + char *pp, *buf; + + buf = kmalloc(PATH_MAX, GFP_KERNEL); + if (!buf) { + return -ENOMEM; + } + pp = d_path(path, buf, PATH_MAX); + new_dentry = cow_break_link(pp); + kfree(buf); + if (!IS_ERR(new_dentry)) { + path->dentry = new_dentry; + dput(old_dentry); + } else + error = PTR_ERR(new_dentry); + } else { + inode->i_flags &= ~(S_IXUNLINK | S_IMMUTABLE); + inode->i_ctime = CURRENT_TIME; + mark_inode_dirty(inode); + } + } + return error; +} + +#else +#warning duplicate inclusion +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vs_cvirt.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_cvirt.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vs_cvirt.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_cvirt.h 2012-01-16 14:51:21.961408547 +0100 @@ -0,0 +1,50 @@ +#ifndef _VS_CVIRT_H +#define _VS_CVIRT_H + +#include "vserver/cvirt.h" +#include "vserver/context.h" +#include "vserver/base.h" +#include "vserver/check.h" +#include "vserver/debug.h" + + +static inline void vx_activate_task(struct task_struct *p) +{ + struct vx_info *vxi; + + if ((vxi = p->vx_info)) { + vx_update_load(vxi); + atomic_inc(&vxi->cvirt.nr_running); + } +} + +static inline void vx_deactivate_task(struct task_struct *p) +{ + struct vx_info *vxi; + + if ((vxi = p->vx_info)) { + vx_update_load(vxi); + atomic_dec(&vxi->cvirt.nr_running); + } +} + +static inline void vx_uninterruptible_inc(struct task_struct *p) +{ + struct vx_info *vxi; + + if ((vxi = p->vx_info)) + atomic_inc(&vxi->cvirt.nr_uninterruptible); +} + +static inline void vx_uninterruptible_dec(struct task_struct *p) +{ + struct vx_info *vxi; + + if ((vxi = p->vx_info)) + atomic_dec(&vxi->cvirt.nr_uninterruptible); +} + + +#else +#warning duplicate inclusion +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vs_device.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_device.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vs_device.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_device.h 2012-01-16 14:51:21.961408547 +0100 @@ -0,0 +1,45 @@ +#ifndef _VS_DEVICE_H +#define _VS_DEVICE_H + +#include "vserver/base.h" +#include "vserver/device.h" +#include "vserver/debug.h" + + +#ifdef CONFIG_VSERVER_DEVICE + +int vs_map_device(struct vx_info *, dev_t, dev_t *, umode_t); + +#define vs_device_perm(v, d, m, p) \ + ((vs_map_device(current_vx_info(), d, NULL, m) & (p)) == (p)) + +#else + +static inline +int vs_map_device(struct vx_info *vxi, + dev_t device, dev_t *target, umode_t mode) +{ + if (target) + *target = device; + return ~0; +} + +#define vs_device_perm(v, d, m, p) ((p) == (p)) + +#endif + + +#define vs_map_chrdev(d, t, p) \ + ((vs_map_device(current_vx_info(), d, t, S_IFCHR) & (p)) == (p)) +#define vs_map_blkdev(d, t, p) \ + ((vs_map_device(current_vx_info(), d, t, S_IFBLK) & (p)) == (p)) + +#define vs_chrdev_perm(d, p) \ + vs_device_perm(current_vx_info(), d, S_IFCHR, p) +#define vs_blkdev_perm(d, p) \ + vs_device_perm(current_vx_info(), d, S_IFBLK, p) + + +#else +#warning duplicate inclusion +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vs_dlimit.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_dlimit.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vs_dlimit.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_dlimit.h 2012-01-16 14:51:21.961408547 +0100 @@ -0,0 +1,215 @@ +#ifndef _VS_DLIMIT_H +#define _VS_DLIMIT_H + +#include + +#include "vserver/dlimit.h" +#include "vserver/base.h" +#include "vserver/debug.h" + + +#define get_dl_info(i) __get_dl_info(i, __FILE__, __LINE__) + +static inline struct dl_info *__get_dl_info(struct dl_info *dli, + const char *_file, int _line) +{ + if (!dli) + return NULL; + vxlprintk(VXD_CBIT(dlim, 4), "get_dl_info(%p[#%d.%d])", + dli, dli ? dli->dl_tag : 0, + dli ? atomic_read(&dli->dl_usecnt) : 0, + _file, _line); + atomic_inc(&dli->dl_usecnt); + return dli; +} + + +#define free_dl_info(i) \ + call_rcu(&(i)->dl_rcu, rcu_free_dl_info) + +#define put_dl_info(i) __put_dl_info(i, __FILE__, __LINE__) + +static inline void __put_dl_info(struct dl_info *dli, + const char *_file, int _line) +{ + if (!dli) + return; + vxlprintk(VXD_CBIT(dlim, 4), "put_dl_info(%p[#%d.%d])", + dli, dli ? dli->dl_tag : 0, + dli ? atomic_read(&dli->dl_usecnt) : 0, + _file, _line); + if (atomic_dec_and_test(&dli->dl_usecnt)) + free_dl_info(dli); +} + + +#define __dlimit_char(d) ((d) ? '*' : ' ') + +static inline int __dl_alloc_space(struct super_block *sb, + tag_t tag, dlsize_t nr, const char *file, int line) +{ + struct dl_info *dli = NULL; + int ret = 0; + + if (nr == 0) + goto out; + dli = locate_dl_info(sb, tag); + if (!dli) + goto out; + + spin_lock(&dli->dl_lock); + ret = (dli->dl_space_used + nr > dli->dl_space_total); + if (!ret) + dli->dl_space_used += nr; + spin_unlock(&dli->dl_lock); + put_dl_info(dli); +out: + vxlprintk(VXD_CBIT(dlim, 1), + "ALLOC (%p,#%d)%c %lld bytes (%d)", + sb, tag, __dlimit_char(dli), (long long)nr, + ret, file, line); + return ret; +} + +static inline void __dl_free_space(struct super_block *sb, + tag_t tag, dlsize_t nr, const char *_file, int _line) +{ + struct dl_info *dli = NULL; + + if (nr == 0) + goto out; + dli = locate_dl_info(sb, tag); + if (!dli) + goto out; + + spin_lock(&dli->dl_lock); + if (dli->dl_space_used > nr) + dli->dl_space_used -= nr; + else + dli->dl_space_used = 0; + spin_unlock(&dli->dl_lock); + put_dl_info(dli); +out: + vxlprintk(VXD_CBIT(dlim, 1), + "FREE (%p,#%d)%c %lld bytes", + sb, tag, __dlimit_char(dli), (long long)nr, + _file, _line); +} + +static inline int __dl_alloc_inode(struct super_block *sb, + tag_t tag, const char *_file, int _line) +{ + struct dl_info *dli; + int ret = 0; + + dli = locate_dl_info(sb, tag); + if (!dli) + goto out; + + spin_lock(&dli->dl_lock); + dli->dl_inodes_used++; + ret = (dli->dl_inodes_used > dli->dl_inodes_total); + spin_unlock(&dli->dl_lock); + put_dl_info(dli); +out: + vxlprintk(VXD_CBIT(dlim, 0), + "ALLOC (%p,#%d)%c inode (%d)", + sb, tag, __dlimit_char(dli), ret, _file, _line); + return ret; +} + +static inline void __dl_free_inode(struct super_block *sb, + tag_t tag, const char *_file, int _line) +{ + struct dl_info *dli; + + dli = locate_dl_info(sb, tag); + if (!dli) + goto out; + + spin_lock(&dli->dl_lock); + if (dli->dl_inodes_used > 1) + dli->dl_inodes_used--; + else + dli->dl_inodes_used = 0; + spin_unlock(&dli->dl_lock); + put_dl_info(dli); +out: + vxlprintk(VXD_CBIT(dlim, 0), + "FREE (%p,#%d)%c inode", + sb, tag, __dlimit_char(dli), _file, _line); +} + +static inline void __dl_adjust_block(struct super_block *sb, tag_t tag, + unsigned long long *free_blocks, unsigned long long *root_blocks, + const char *_file, int _line) +{ + struct dl_info *dli; + uint64_t broot, bfree; + + dli = locate_dl_info(sb, tag); + if (!dli) + return; + + spin_lock(&dli->dl_lock); + broot = (dli->dl_space_total - + (dli->dl_space_total >> 10) * dli->dl_nrlmult) + >> sb->s_blocksize_bits; + bfree = (dli->dl_space_total - dli->dl_space_used) + >> sb->s_blocksize_bits; + spin_unlock(&dli->dl_lock); + + vxlprintk(VXD_CBIT(dlim, 2), + "ADJUST: %lld,%lld on %lld,%lld [mult=%d]", + (long long)bfree, (long long)broot, + *free_blocks, *root_blocks, dli->dl_nrlmult, + _file, _line); + if (free_blocks) { + if (*free_blocks > bfree) + *free_blocks = bfree; + } + if (root_blocks) { + if (*root_blocks > broot) + *root_blocks = broot; + } + put_dl_info(dli); +} + +#define dl_prealloc_space(in, bytes) \ + __dl_alloc_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \ + __FILE__, __LINE__ ) + +#define dl_alloc_space(in, bytes) \ + __dl_alloc_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \ + __FILE__, __LINE__ ) + +#define dl_reserve_space(in, bytes) \ + __dl_alloc_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \ + __FILE__, __LINE__ ) + +#define dl_claim_space(in, bytes) (0) + +#define dl_release_space(in, bytes) \ + __dl_free_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \ + __FILE__, __LINE__ ) + +#define dl_free_space(in, bytes) \ + __dl_free_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \ + __FILE__, __LINE__ ) + + + +#define dl_alloc_inode(in) \ + __dl_alloc_inode((in)->i_sb, (in)->i_tag, __FILE__, __LINE__ ) + +#define dl_free_inode(in) \ + __dl_free_inode((in)->i_sb, (in)->i_tag, __FILE__, __LINE__ ) + + +#define dl_adjust_block(sb, tag, fb, rb) \ + __dl_adjust_block(sb, tag, fb, rb, __FILE__, __LINE__ ) + + +#else +#warning duplicate inclusion +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/base.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/base.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/base.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/base.h 2012-01-16 14:51:21.969408519 +0100 @@ -0,0 +1,170 @@ +#ifndef _VX_BASE_H +#define _VX_BASE_H + + +/* context state changes */ + +enum { + VSC_STARTUP = 1, + VSC_SHUTDOWN, + + VSC_NETUP, + VSC_NETDOWN, +}; + + + +#define vx_task_xid(t) ((t)->xid) + +#define vx_current_xid() vx_task_xid(current) + +#define current_vx_info() (current->vx_info) + + +#define nx_task_nid(t) ((t)->nid) + +#define nx_current_nid() nx_task_nid(current) + +#define current_nx_info() (current->nx_info) + + +/* generic flag merging */ + +#define vs_check_flags(v, m, f) (((v) & (m)) ^ (f)) + +#define vs_mask_flags(v, f, m) (((v) & ~(m)) | ((f) & (m))) + +#define vs_mask_mask(v, f, m) (((v) & ~(m)) | ((v) & (f) & (m))) + +#define vs_check_bit(v, n) ((v) & (1LL << (n))) + + +/* context flags */ + +#define __vx_flags(v) ((v) ? (v)->vx_flags : 0) + +#define vx_current_flags() __vx_flags(current_vx_info()) + +#define vx_info_flags(v, m, f) \ + vs_check_flags(__vx_flags(v), m, f) + +#define task_vx_flags(t, m, f) \ + ((t) && vx_info_flags((t)->vx_info, m, f)) + +#define vx_flags(m, f) vx_info_flags(current_vx_info(), m, f) + + +/* context caps */ + +#define __vx_ccaps(v) ((v) ? (v)->vx_ccaps : 0) + +#define vx_current_ccaps() __vx_ccaps(current_vx_info()) + +#define vx_info_ccaps(v, c) (__vx_ccaps(v) & (c)) + +#define vx_ccaps(c) vx_info_ccaps(current_vx_info(), (c)) + + + +/* network flags */ + +#define __nx_flags(n) ((n) ? (n)->nx_flags : 0) + +#define nx_current_flags() __nx_flags(current_nx_info()) + +#define nx_info_flags(n, m, f) \ + vs_check_flags(__nx_flags(n), m, f) + +#define task_nx_flags(t, m, f) \ + ((t) && nx_info_flags((t)->nx_info, m, f)) + +#define nx_flags(m, f) nx_info_flags(current_nx_info(), m, f) + + +/* network caps */ + +#define __nx_ncaps(n) ((n) ? (n)->nx_ncaps : 0) + +#define nx_current_ncaps() __nx_ncaps(current_nx_info()) + +#define nx_info_ncaps(n, c) (__nx_ncaps(n) & (c)) + +#define nx_ncaps(c) nx_info_ncaps(current_nx_info(), c) + + +/* context mask capabilities */ + +#define __vx_mcaps(v) ((v) ? (v)->vx_ccaps >> 32UL : ~0 ) + +#define vx_info_mcaps(v, c) (__vx_mcaps(v) & (c)) + +#define vx_mcaps(c) vx_info_mcaps(current_vx_info(), c) + + +/* context bcap mask */ + +#define __vx_bcaps(v) ((v)->vx_bcaps) + +#define vx_current_bcaps() __vx_bcaps(current_vx_info()) + + +/* mask given bcaps */ + +#define vx_info_mbcaps(v, c) ((v) ? cap_intersect(__vx_bcaps(v), c) : c) + +#define vx_mbcaps(c) vx_info_mbcaps(current_vx_info(), c) + + +/* masked cap_bset */ + +#define vx_info_cap_bset(v) vx_info_mbcaps(v, current->cap_bset) + +#define vx_current_cap_bset() vx_info_cap_bset(current_vx_info()) + +#if 0 +#define vx_info_mbcap(v, b) \ + (!vx_info_flags(v, VXF_STATE_SETUP, 0) ? \ + vx_info_bcaps(v, b) : (b)) + +#define task_vx_mbcap(t, b) \ + vx_info_mbcap((t)->vx_info, (t)->b) + +#define vx_mbcap(b) task_vx_mbcap(current, b) +#endif + +#define vx_cap_raised(v, c, f) cap_raised(vx_info_mbcaps(v, c), f) + +#define vx_capable(b, c) (capable(b) || \ + (cap_raised(current_cap(), b) && vx_ccaps(c))) + +#define nx_capable(b, c) (capable(b) || \ + (cap_raised(current_cap(), b) && nx_ncaps(c))) + +#define vx_task_initpid(t, n) \ + ((t)->vx_info && \ + ((t)->vx_info->vx_initpid == (n))) + +#define vx_current_initpid(n) vx_task_initpid(current, n) + + +/* context unshare mask */ + +#define __vx_umask(v) ((v)->vx_umask) + +#define vx_current_umask() __vx_umask(current_vx_info()) + +#define vx_can_unshare(b, f) (capable(b) || \ + (cap_raised(current_cap(), b) && \ + !((f) & ~vx_current_umask()))) + + +#define __vx_state(v) ((v) ? ((v)->vx_state) : 0) + +#define vx_info_state(v, m) (__vx_state(v) & (m)) + + +#define __nx_state(n) ((n) ? ((n)->nx_state) : 0) + +#define nx_info_state(n, m) (__nx_state(n) & (m)) + +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/cacct_cmd.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/cacct_cmd.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/cacct_cmd.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/cacct_cmd.h 2012-01-16 14:51:21.973408505 +0100 @@ -0,0 +1,23 @@ +#ifndef _VX_CACCT_CMD_H +#define _VX_CACCT_CMD_H + + +/* virtual host info name commands */ + +#define VCMD_sock_stat VC_CMD(VSTAT, 5, 0) + +struct vcmd_sock_stat_v0 { + uint32_t field; + uint32_t count[3]; + uint64_t total[3]; +}; + + +#ifdef __KERNEL__ + +#include + +extern int vc_sock_stat(struct vx_info *, void __user *); + +#endif /* __KERNEL__ */ +#endif /* _VX_CACCT_CMD_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/cacct_def.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/cacct_def.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/cacct_def.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/cacct_def.h 2012-01-16 14:51:21.973408505 +0100 @@ -0,0 +1,43 @@ +#ifndef _VX_CACCT_DEF_H +#define _VX_CACCT_DEF_H + +#include +#include + + +struct _vx_sock_acc { + atomic_long_t count; + atomic_long_t total; +}; + +/* context sub struct */ + +struct _vx_cacct { + struct _vx_sock_acc sock[VXA_SOCK_SIZE][3]; + atomic_t slab[8]; + atomic_t page[6][8]; +}; + +#ifdef CONFIG_VSERVER_DEBUG + +static inline void __dump_vx_cacct(struct _vx_cacct *cacct) +{ + int i, j; + + printk("\t_vx_cacct:"); + for (i = 0; i < 6; i++) { + struct _vx_sock_acc *ptr = cacct->sock[i]; + + printk("\t [%d] =", i); + for (j = 0; j < 3; j++) { + printk(" [%d] = %8lu, %8lu", j, + atomic_long_read(&ptr[j].count), + atomic_long_read(&ptr[j].total)); + } + printk("\n"); + } +} + +#endif + +#endif /* _VX_CACCT_DEF_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/cacct.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/cacct.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/cacct.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/cacct.h 2012-01-16 14:51:21.973408505 +0100 @@ -0,0 +1,15 @@ +#ifndef _VX_CACCT_H +#define _VX_CACCT_H + + +enum sock_acc_field { + VXA_SOCK_UNSPEC = 0, + VXA_SOCK_UNIX, + VXA_SOCK_INET, + VXA_SOCK_INET6, + VXA_SOCK_PACKET, + VXA_SOCK_OTHER, + VXA_SOCK_SIZE /* array size */ +}; + +#endif /* _VX_CACCT_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/cacct_int.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/cacct_int.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/cacct_int.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/cacct_int.h 2012-01-16 14:51:21.973408505 +0100 @@ -0,0 +1,21 @@ +#ifndef _VX_CACCT_INT_H +#define _VX_CACCT_INT_H + + +#ifdef __KERNEL__ + +static inline +unsigned long vx_sock_count(struct _vx_cacct *cacct, int type, int pos) +{ + return atomic_long_read(&cacct->sock[type][pos].count); +} + + +static inline +unsigned long vx_sock_total(struct _vx_cacct *cacct, int type, int pos) +{ + return atomic_long_read(&cacct->sock[type][pos].total); +} + +#endif /* __KERNEL__ */ +#endif /* _VX_CACCT_INT_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/check.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/check.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/check.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/check.h 2012-01-16 14:51:21.973408505 +0100 @@ -0,0 +1,89 @@ +#ifndef _VS_CHECK_H +#define _VS_CHECK_H + + +#define MAX_S_CONTEXT 65535 /* Arbitrary limit */ + +#ifdef CONFIG_VSERVER_DYNAMIC_IDS +#define MIN_D_CONTEXT 49152 /* dynamic contexts start here */ +#else +#define MIN_D_CONTEXT 65536 +#endif + +/* check conditions */ + +#define VS_ADMIN 0x0001 +#define VS_WATCH 0x0002 +#define VS_HIDE 0x0004 +#define VS_HOSTID 0x0008 + +#define VS_IDENT 0x0010 +#define VS_EQUIV 0x0020 +#define VS_PARENT 0x0040 +#define VS_CHILD 0x0080 + +#define VS_ARG_MASK 0x00F0 + +#define VS_DYNAMIC 0x0100 +#define VS_STATIC 0x0200 + +#define VS_ATR_MASK 0x0F00 + +#ifdef CONFIG_VSERVER_PRIVACY +#define VS_ADMIN_P (0) +#define VS_WATCH_P (0) +#else +#define VS_ADMIN_P VS_ADMIN +#define VS_WATCH_P VS_WATCH +#endif + +#define VS_HARDIRQ 0x1000 +#define VS_SOFTIRQ 0x2000 +#define VS_IRQ 0x4000 + +#define VS_IRQ_MASK 0xF000 + +#include + +/* + * check current context for ADMIN/WATCH and + * optionally against supplied argument + */ +static inline int __vs_check(int cid, int id, unsigned int mode) +{ + if (mode & VS_ARG_MASK) { + if ((mode & VS_IDENT) && (id == cid)) + return 1; + } + if (mode & VS_ATR_MASK) { + if ((mode & VS_DYNAMIC) && + (id >= MIN_D_CONTEXT) && + (id <= MAX_S_CONTEXT)) + return 1; + if ((mode & VS_STATIC) && + (id > 1) && (id < MIN_D_CONTEXT)) + return 1; + } + if (mode & VS_IRQ_MASK) { + if ((mode & VS_IRQ) && unlikely(in_interrupt())) + return 1; + if ((mode & VS_HARDIRQ) && unlikely(in_irq())) + return 1; + if ((mode & VS_SOFTIRQ) && unlikely(in_softirq())) + return 1; + } + return (((mode & VS_ADMIN) && (cid == 0)) || + ((mode & VS_WATCH) && (cid == 1)) || + ((mode & VS_HOSTID) && (id == 0))); +} + +#define vx_check(c, m) __vs_check(vx_current_xid(), c, (m) | VS_IRQ) + +#define vx_weak_check(c, m) ((m) ? vx_check(c, m) : 1) + + +#define nx_check(c, m) __vs_check(nx_current_nid(), c, m) + +#define nx_weak_check(c, m) ((m) ? nx_check(c, m) : 1) + +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/context_cmd.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/context_cmd.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/context_cmd.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/context_cmd.h 2012-01-16 14:51:21.973408505 +0100 @@ -0,0 +1,145 @@ +#ifndef _VX_CONTEXT_CMD_H +#define _VX_CONTEXT_CMD_H + + +/* vinfo commands */ + +#define VCMD_task_xid VC_CMD(VINFO, 1, 0) + +#ifdef __KERNEL__ +extern int vc_task_xid(uint32_t); + +#endif /* __KERNEL__ */ + +#define VCMD_vx_info VC_CMD(VINFO, 5, 0) + +struct vcmd_vx_info_v0 { + uint32_t xid; + uint32_t initpid; + /* more to come */ +}; + +#ifdef __KERNEL__ +extern int vc_vx_info(struct vx_info *, void __user *); + +#endif /* __KERNEL__ */ + +#define VCMD_ctx_stat VC_CMD(VSTAT, 0, 0) + +struct vcmd_ctx_stat_v0 { + uint32_t usecnt; + uint32_t tasks; + /* more to come */ +}; + +#ifdef __KERNEL__ +extern int vc_ctx_stat(struct vx_info *, void __user *); + +#endif /* __KERNEL__ */ + +/* context commands */ + +#define VCMD_ctx_create_v0 VC_CMD(VPROC, 1, 0) +#define VCMD_ctx_create VC_CMD(VPROC, 1, 1) + +struct vcmd_ctx_create { + uint64_t flagword; +}; + +#define VCMD_ctx_migrate_v0 VC_CMD(PROCMIG, 1, 0) +#define VCMD_ctx_migrate VC_CMD(PROCMIG, 1, 1) + +struct vcmd_ctx_migrate { + uint64_t flagword; +}; + +#ifdef __KERNEL__ +extern int vc_ctx_create(uint32_t, void __user *); +extern int vc_ctx_migrate(struct vx_info *, void __user *); + +#endif /* __KERNEL__ */ + + +/* flag commands */ + +#define VCMD_get_cflags VC_CMD(FLAGS, 1, 0) +#define VCMD_set_cflags VC_CMD(FLAGS, 2, 0) + +struct vcmd_ctx_flags_v0 { + uint64_t flagword; + uint64_t mask; +}; + +#ifdef __KERNEL__ +extern int vc_get_cflags(struct vx_info *, void __user *); +extern int vc_set_cflags(struct vx_info *, void __user *); + +#endif /* __KERNEL__ */ + + +/* context caps commands */ + +#define VCMD_get_ccaps VC_CMD(FLAGS, 3, 1) +#define VCMD_set_ccaps VC_CMD(FLAGS, 4, 1) + +struct vcmd_ctx_caps_v1 { + uint64_t ccaps; + uint64_t cmask; +}; + +#ifdef __KERNEL__ +extern int vc_get_ccaps(struct vx_info *, void __user *); +extern int vc_set_ccaps(struct vx_info *, void __user *); + +#endif /* __KERNEL__ */ + + +/* bcaps commands */ + +#define VCMD_get_bcaps VC_CMD(FLAGS, 9, 0) +#define VCMD_set_bcaps VC_CMD(FLAGS, 10, 0) + +struct vcmd_bcaps { + uint64_t bcaps; + uint64_t bmask; +}; + +#ifdef __KERNEL__ +extern int vc_get_bcaps(struct vx_info *, void __user *); +extern int vc_set_bcaps(struct vx_info *, void __user *); + +#endif /* __KERNEL__ */ + + +/* umask commands */ + +#define VCMD_get_umask VC_CMD(FLAGS, 13, 0) +#define VCMD_set_umask VC_CMD(FLAGS, 14, 0) + +struct vcmd_umask { + uint64_t umask; + uint64_t mask; +}; + +#ifdef __KERNEL__ +extern int vc_get_umask(struct vx_info *, void __user *); +extern int vc_set_umask(struct vx_info *, void __user *); + +#endif /* __KERNEL__ */ + + +/* OOM badness */ + +#define VCMD_get_badness VC_CMD(MEMCTRL, 5, 0) +#define VCMD_set_badness VC_CMD(MEMCTRL, 6, 0) + +struct vcmd_badness_v0 { + int64_t bias; +}; + +#ifdef __KERNEL__ +extern int vc_get_badness(struct vx_info *, void __user *); +extern int vc_set_badness(struct vx_info *, void __user *); + +#endif /* __KERNEL__ */ +#endif /* _VX_CONTEXT_CMD_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/context.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/context.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/context.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/context.h 2012-01-16 14:51:21.973408505 +0100 @@ -0,0 +1,184 @@ +#ifndef _VX_CONTEXT_H +#define _VX_CONTEXT_H + +#include +#include + + +/* context flags */ + +#define VXF_INFO_SCHED 0x00000002 +#define VXF_INFO_NPROC 0x00000004 +#define VXF_INFO_PRIVATE 0x00000008 + +#define VXF_INFO_INIT 0x00000010 +#define VXF_INFO_HIDE 0x00000020 +#define VXF_INFO_ULIMIT 0x00000040 +#define VXF_INFO_NSPACE 0x00000080 + +#define VXF_SCHED_HARD 0x00000100 +#define VXF_SCHED_PRIO 0x00000200 +#define VXF_SCHED_PAUSE 0x00000400 + +#define VXF_VIRT_MEM 0x00010000 +#define VXF_VIRT_UPTIME 0x00020000 +#define VXF_VIRT_CPU 0x00040000 +#define VXF_VIRT_LOAD 0x00080000 +#define VXF_VIRT_TIME 0x00100000 + +#define VXF_HIDE_MOUNT 0x01000000 +/* was VXF_HIDE_NETIF 0x02000000 */ +#define VXF_HIDE_VINFO 0x04000000 + +#define VXF_STATE_SETUP (1ULL << 32) +#define VXF_STATE_INIT (1ULL << 33) +#define VXF_STATE_ADMIN (1ULL << 34) + +#define VXF_SC_HELPER (1ULL << 36) +#define VXF_REBOOT_KILL (1ULL << 37) +#define VXF_PERSISTENT (1ULL << 38) + +#define VXF_FORK_RSS (1ULL << 48) +#define VXF_PROLIFIC (1ULL << 49) + +#define VXF_IGNEG_NICE (1ULL << 52) + +#define VXF_ONE_TIME (0x0007ULL << 32) + +#define VXF_INIT_SET (VXF_STATE_SETUP | VXF_STATE_INIT | VXF_STATE_ADMIN) + + +/* context migration */ + +#define VXM_SET_INIT 0x00000001 +#define VXM_SET_REAPER 0x00000002 + +/* context caps */ + +#define VXC_CAP_MASK 0x00000000 + +#define VXC_SET_UTSNAME 0x00000001 +#define VXC_SET_RLIMIT 0x00000002 +#define VXC_FS_SECURITY 0x00000004 +#define VXC_FS_TRUSTED 0x00000008 +#define VXC_TIOCSTI 0x00000010 + +/* was VXC_RAW_ICMP 0x00000100 */ +#define VXC_SYSLOG 0x00001000 +#define VXC_OOM_ADJUST 0x00002000 +#define VXC_AUDIT_CONTROL 0x00004000 + +#define VXC_SECURE_MOUNT 0x00010000 +#define VXC_SECURE_REMOUNT 0x00020000 +#define VXC_BINARY_MOUNT 0x00040000 + +#define VXC_QUOTA_CTL 0x00100000 +#define VXC_ADMIN_MAPPER 0x00200000 +#define VXC_ADMIN_CLOOP 0x00400000 + +#define VXC_KTHREAD 0x01000000 +#define VXC_NAMESPACE 0x02000000 + + +#ifdef __KERNEL__ + +#include +#include +#include + +#include "limit_def.h" +#include "sched_def.h" +#include "cvirt_def.h" +#include "cacct_def.h" +#include "device_def.h" + +#define VX_SPACES 2 + +struct _vx_info_pc { + struct _vx_sched_pc sched_pc; + struct _vx_cvirt_pc cvirt_pc; +}; + +struct vx_info { + struct hlist_node vx_hlist; /* linked list of contexts */ + xid_t vx_id; /* context id */ + atomic_t vx_usecnt; /* usage count */ + atomic_t vx_tasks; /* tasks count */ + struct vx_info *vx_parent; /* parent context */ + int vx_state; /* context state */ + + unsigned long vx_nsmask[VX_SPACES]; /* assignment mask */ + struct nsproxy *vx_nsproxy[VX_SPACES]; /* private namespaces */ + struct fs_struct *vx_fs[VX_SPACES]; /* private namespace fs */ + + uint64_t vx_flags; /* context flags */ + uint64_t vx_ccaps; /* context caps (vserver) */ + kernel_cap_t vx_bcaps; /* bounding caps (system) */ + unsigned long vx_umask; /* unshare mask (guest) */ + + struct task_struct *vx_reaper; /* guest reaper process */ + pid_t vx_initpid; /* PID of guest init */ + int64_t vx_badness_bias; /* OOM points bias */ + + struct _vx_limit limit; /* vserver limits */ + struct _vx_sched sched; /* vserver scheduler */ + struct _vx_cvirt cvirt; /* virtual/bias stuff */ + struct _vx_cacct cacct; /* context accounting */ + + struct _vx_device dmap; /* default device map targets */ + +#ifndef CONFIG_SMP + struct _vx_info_pc info_pc; /* per cpu data */ +#else + struct _vx_info_pc *ptr_pc; /* per cpu array */ +#endif + + wait_queue_head_t vx_wait; /* context exit waitqueue */ + int reboot_cmd; /* last sys_reboot() cmd */ + int exit_code; /* last process exit code */ + + char vx_name[65]; /* vserver name */ +}; + +#ifndef CONFIG_SMP +#define vx_ptr_pc(vxi) (&(vxi)->info_pc) +#define vx_per_cpu(vxi, v, id) vx_ptr_pc(vxi)->v +#else +#define vx_ptr_pc(vxi) ((vxi)->ptr_pc) +#define vx_per_cpu(vxi, v, id) per_cpu_ptr(vx_ptr_pc(vxi), id)->v +#endif + +#define vx_cpu(vxi, v) vx_per_cpu(vxi, v, smp_processor_id()) + + +struct vx_info_save { + struct vx_info *vxi; + xid_t xid; +}; + + +/* status flags */ + +#define VXS_HASHED 0x0001 +#define VXS_PAUSED 0x0010 +#define VXS_SHUTDOWN 0x0100 +#define VXS_HELPER 0x1000 +#define VXS_RELEASED 0x8000 + + +extern void claim_vx_info(struct vx_info *, struct task_struct *); +extern void release_vx_info(struct vx_info *, struct task_struct *); + +extern struct vx_info *lookup_vx_info(int); +extern struct vx_info *lookup_or_create_vx_info(int); + +extern int get_xid_list(int, unsigned int *, int); +extern int xid_is_hashed(xid_t); + +extern int vx_migrate_task(struct task_struct *, struct vx_info *, int); + +extern long vs_state_change(struct vx_info *, unsigned int); + + +#endif /* __KERNEL__ */ +#endif /* _VX_CONTEXT_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/cvirt_cmd.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/cvirt_cmd.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/cvirt_cmd.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/cvirt_cmd.h 2012-01-16 14:51:21.973408505 +0100 @@ -0,0 +1,53 @@ +#ifndef _VX_CVIRT_CMD_H +#define _VX_CVIRT_CMD_H + + +/* virtual host info name commands */ + +#define VCMD_set_vhi_name VC_CMD(VHOST, 1, 0) +#define VCMD_get_vhi_name VC_CMD(VHOST, 2, 0) + +struct vcmd_vhi_name_v0 { + uint32_t field; + char name[65]; +}; + + +enum vhi_name_field { + VHIN_CONTEXT = 0, + VHIN_SYSNAME, + VHIN_NODENAME, + VHIN_RELEASE, + VHIN_VERSION, + VHIN_MACHINE, + VHIN_DOMAINNAME, +}; + + +#ifdef __KERNEL__ + +#include + +extern int vc_set_vhi_name(struct vx_info *, void __user *); +extern int vc_get_vhi_name(struct vx_info *, void __user *); + +#endif /* __KERNEL__ */ + +#define VCMD_virt_stat VC_CMD(VSTAT, 3, 0) + +struct vcmd_virt_stat_v0 { + uint64_t offset; + uint64_t uptime; + uint32_t nr_threads; + uint32_t nr_running; + uint32_t nr_uninterruptible; + uint32_t nr_onhold; + uint32_t nr_forks; + uint32_t load[3]; +}; + +#ifdef __KERNEL__ +extern int vc_virt_stat(struct vx_info *, void __user *); + +#endif /* __KERNEL__ */ +#endif /* _VX_CVIRT_CMD_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/cvirt_def.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/cvirt_def.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/cvirt_def.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/cvirt_def.h 2012-01-16 14:51:21.973408505 +0100 @@ -0,0 +1,80 @@ +#ifndef _VX_CVIRT_DEF_H +#define _VX_CVIRT_DEF_H + +#include +#include +#include +#include +#include + + +struct _vx_usage_stat { + uint64_t user; + uint64_t nice; + uint64_t system; + uint64_t softirq; + uint64_t irq; + uint64_t idle; + uint64_t iowait; +}; + +struct _vx_syslog { + wait_queue_head_t log_wait; + spinlock_t logbuf_lock; /* lock for the log buffer */ + + unsigned long log_start; /* next char to be read by syslog() */ + unsigned long con_start; /* next char to be sent to consoles */ + unsigned long log_end; /* most-recently-written-char + 1 */ + unsigned long logged_chars; /* #chars since last read+clear operation */ + + char log_buf[1024]; +}; + + +/* context sub struct */ + +struct _vx_cvirt { + atomic_t nr_threads; /* number of current threads */ + atomic_t nr_running; /* number of running threads */ + atomic_t nr_uninterruptible; /* number of uninterruptible threads */ + + atomic_t nr_onhold; /* processes on hold */ + uint32_t onhold_last; /* jiffies when put on hold */ + + struct timeval bias_tv; /* time offset to the host */ + struct timespec bias_idle; + struct timespec bias_uptime; /* context creation point */ + uint64_t bias_clock; /* offset in clock_t */ + + spinlock_t load_lock; /* lock for the load averages */ + atomic_t load_updates; /* nr of load updates done so far */ + uint32_t load_last; /* last time load was calculated */ + uint32_t load[3]; /* load averages 1,5,15 */ + + atomic_t total_forks; /* number of forks so far */ + + struct _vx_syslog syslog; +}; + +struct _vx_cvirt_pc { + struct _vx_usage_stat cpustat; +}; + + +#ifdef CONFIG_VSERVER_DEBUG + +static inline void __dump_vx_cvirt(struct _vx_cvirt *cvirt) +{ + printk("\t_vx_cvirt:\n"); + printk("\t threads: %4d, %4d, %4d, %4d\n", + atomic_read(&cvirt->nr_threads), + atomic_read(&cvirt->nr_running), + atomic_read(&cvirt->nr_uninterruptible), + atomic_read(&cvirt->nr_onhold)); + /* add rest here */ + printk("\t total_forks = %d\n", atomic_read(&cvirt->total_forks)); +} + +#endif + +#endif /* _VX_CVIRT_DEF_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/cvirt.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/cvirt.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/cvirt.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/cvirt.h 2012-01-16 14:51:21.973408505 +0100 @@ -0,0 +1,20 @@ +#ifndef _VX_CVIRT_H +#define _VX_CVIRT_H + + +#ifdef __KERNEL__ + +struct timespec; + +void vx_vsi_uptime(struct timespec *, struct timespec *); + + +struct vx_info; + +void vx_update_load(struct vx_info *); + + +int vx_do_syslog(int, char __user *, int); + +#endif /* __KERNEL__ */ +#endif /* _VX_CVIRT_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/debug_cmd.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/debug_cmd.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/debug_cmd.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/debug_cmd.h 2012-01-16 14:51:21.973408505 +0100 @@ -0,0 +1,58 @@ +#ifndef _VX_DEBUG_CMD_H +#define _VX_DEBUG_CMD_H + + +/* debug commands */ + +#define VCMD_dump_history VC_CMD(DEBUG, 1, 0) + +#define VCMD_read_history VC_CMD(DEBUG, 5, 0) +#define VCMD_read_monitor VC_CMD(DEBUG, 6, 0) + +struct vcmd_read_history_v0 { + uint32_t index; + uint32_t count; + char __user *data; +}; + +struct vcmd_read_monitor_v0 { + uint32_t index; + uint32_t count; + char __user *data; +}; + + +#ifdef __KERNEL__ + +#ifdef CONFIG_COMPAT + +#include + +struct vcmd_read_history_v0_x32 { + uint32_t index; + uint32_t count; + compat_uptr_t data_ptr; +}; + +struct vcmd_read_monitor_v0_x32 { + uint32_t index; + uint32_t count; + compat_uptr_t data_ptr; +}; + +#endif /* CONFIG_COMPAT */ + +extern int vc_dump_history(uint32_t); + +extern int vc_read_history(uint32_t, void __user *); +extern int vc_read_monitor(uint32_t, void __user *); + +#ifdef CONFIG_COMPAT + +extern int vc_read_history_x32(uint32_t, void __user *); +extern int vc_read_monitor_x32(uint32_t, void __user *); + +#endif /* CONFIG_COMPAT */ + +#endif /* __KERNEL__ */ +#endif /* _VX_DEBUG_CMD_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/debug.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/debug.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/debug.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/debug.h 2012-01-16 14:51:21.973408505 +0100 @@ -0,0 +1,127 @@ +#ifndef _VX_DEBUG_H +#define _VX_DEBUG_H + + +#define VXD_CBIT(n, m) (vx_debug_ ## n & (1 << (m))) +#define VXD_CMIN(n, m) (vx_debug_ ## n > (m)) +#define VXD_MASK(n, m) (vx_debug_ ## n & (m)) + +#define VXD_DEV(d) (d), (d)->bd_inode->i_ino, \ + imajor((d)->bd_inode), iminor((d)->bd_inode) +#define VXF_DEV "%p[%lu,%d:%d]" + + +#define vxd_path(p) \ + ({ static char _buffer[PATH_MAX]; \ + d_path(p, _buffer, sizeof(_buffer)); }) + +#define vxd_cond_path(n) \ + ((n) ? vxd_path(&(n)->path) : "" ) + + +#ifdef CONFIG_VSERVER_DEBUG + +extern unsigned int vx_debug_switch; +extern unsigned int vx_debug_xid; +extern unsigned int vx_debug_nid; +extern unsigned int vx_debug_tag; +extern unsigned int vx_debug_net; +extern unsigned int vx_debug_limit; +extern unsigned int vx_debug_cres; +extern unsigned int vx_debug_dlim; +extern unsigned int vx_debug_quota; +extern unsigned int vx_debug_cvirt; +extern unsigned int vx_debug_space; +extern unsigned int vx_debug_misc; + + +#define VX_LOGLEVEL "vxD: " +#define VX_PROC_FMT "%p: " +#define VX_PROCESS current + +#define vxdprintk(c, f, x...) \ + do { \ + if (c) \ + printk(VX_LOGLEVEL VX_PROC_FMT f "\n", \ + VX_PROCESS , ##x); \ + } while (0) + +#define vxlprintk(c, f, x...) \ + do { \ + if (c) \ + printk(VX_LOGLEVEL f " @%s:%d\n", x); \ + } while (0) + +#define vxfprintk(c, f, x...) \ + do { \ + if (c) \ + printk(VX_LOGLEVEL f " %s@%s:%d\n", x); \ + } while (0) + + +struct vx_info; + +void dump_vx_info(struct vx_info *, int); +void dump_vx_info_inactive(int); + +#else /* CONFIG_VSERVER_DEBUG */ + +#define vx_debug_switch 0 +#define vx_debug_xid 0 +#define vx_debug_nid 0 +#define vx_debug_tag 0 +#define vx_debug_net 0 +#define vx_debug_limit 0 +#define vx_debug_cres 0 +#define vx_debug_dlim 0 +#define vx_debug_cvirt 0 + +#define vxdprintk(x...) do { } while (0) +#define vxlprintk(x...) do { } while (0) +#define vxfprintk(x...) do { } while (0) + +#endif /* CONFIG_VSERVER_DEBUG */ + + +#ifdef CONFIG_VSERVER_WARN + +#define VX_WARNLEVEL KERN_WARNING "vxW: " +#define VX_WARN_TASK "[»%s«,%u:#%u|%u|%u] " +#define VX_WARN_XID "[xid #%u] " +#define VX_WARN_NID "[nid #%u] " +#define VX_WARN_TAG "[tag #%u] " + +#define vxwprintk(c, f, x...) \ + do { \ + if (c) \ + printk(VX_WARNLEVEL f "\n", ##x); \ + } while (0) + +#else /* CONFIG_VSERVER_WARN */ + +#define vxwprintk(x...) do { } while (0) + +#endif /* CONFIG_VSERVER_WARN */ + +#define vxwprintk_task(c, f, x...) \ + vxwprintk(c, VX_WARN_TASK f, \ + current->comm, current->pid, \ + current->xid, current->nid, current->tag, ##x) +#define vxwprintk_xid(c, f, x...) \ + vxwprintk(c, VX_WARN_XID f, current->xid, x) +#define vxwprintk_nid(c, f, x...) \ + vxwprintk(c, VX_WARN_NID f, current->nid, x) +#define vxwprintk_tag(c, f, x...) \ + vxwprintk(c, VX_WARN_TAG f, current->tag, x) + +#ifdef CONFIG_VSERVER_DEBUG +#define vxd_assert_lock(l) assert_spin_locked(l) +#define vxd_assert(c, f, x...) vxlprintk(!(c), \ + "assertion [" f "] failed.", ##x, __FILE__, __LINE__) +#else +#define vxd_assert_lock(l) do { } while (0) +#define vxd_assert(c, f, x...) do { } while (0) +#endif + + +#endif /* _VX_DEBUG_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/device_cmd.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/device_cmd.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/device_cmd.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/device_cmd.h 2012-01-16 14:51:21.973408505 +0100 @@ -0,0 +1,44 @@ +#ifndef _VX_DEVICE_CMD_H +#define _VX_DEVICE_CMD_H + + +/* device vserver commands */ + +#define VCMD_set_mapping VC_CMD(DEVICE, 1, 0) +#define VCMD_unset_mapping VC_CMD(DEVICE, 2, 0) + +struct vcmd_set_mapping_v0 { + const char __user *device; + const char __user *target; + uint32_t flags; +}; + + +#ifdef __KERNEL__ + +#ifdef CONFIG_COMPAT + +#include + +struct vcmd_set_mapping_v0_x32 { + compat_uptr_t device_ptr; + compat_uptr_t target_ptr; + uint32_t flags; +}; + +#endif /* CONFIG_COMPAT */ + +#include + +extern int vc_set_mapping(struct vx_info *, void __user *); +extern int vc_unset_mapping(struct vx_info *, void __user *); + +#ifdef CONFIG_COMPAT + +extern int vc_set_mapping_x32(struct vx_info *, void __user *); +extern int vc_unset_mapping_x32(struct vx_info *, void __user *); + +#endif /* CONFIG_COMPAT */ + +#endif /* __KERNEL__ */ +#endif /* _VX_DEVICE_CMD_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/device_def.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/device_def.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/device_def.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/device_def.h 2012-01-16 14:51:21.973408505 +0100 @@ -0,0 +1,17 @@ +#ifndef _VX_DEVICE_DEF_H +#define _VX_DEVICE_DEF_H + +#include + +struct vx_dmap_target { + dev_t target; + uint32_t flags; +}; + +struct _vx_device { +#ifdef CONFIG_VSERVER_DEVICE + struct vx_dmap_target targets[2]; +#endif +}; + +#endif /* _VX_DEVICE_DEF_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/device.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/device.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/device.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/device.h 2012-01-16 14:51:21.973408505 +0100 @@ -0,0 +1,15 @@ +#ifndef _VX_DEVICE_H +#define _VX_DEVICE_H + + +#define DATTR_CREATE 0x00000001 +#define DATTR_OPEN 0x00000002 + +#define DATTR_REMAP 0x00000010 + +#define DATTR_MASK 0x00000013 + + +#else /* _VX_DEVICE_H */ +#warning duplicate inclusion +#endif /* _VX_DEVICE_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/dlimit_cmd.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/dlimit_cmd.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/dlimit_cmd.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/dlimit_cmd.h 2012-01-16 14:51:21.973408505 +0100 @@ -0,0 +1,109 @@ +#ifndef _VX_DLIMIT_CMD_H +#define _VX_DLIMIT_CMD_H + + +/* dlimit vserver commands */ + +#define VCMD_add_dlimit VC_CMD(DLIMIT, 1, 0) +#define VCMD_rem_dlimit VC_CMD(DLIMIT, 2, 0) + +#define VCMD_set_dlimit VC_CMD(DLIMIT, 5, 0) +#define VCMD_get_dlimit VC_CMD(DLIMIT, 6, 0) + +struct vcmd_ctx_dlimit_base_v0 { + const char __user *name; + uint32_t flags; +}; + +struct vcmd_ctx_dlimit_v0 { + const char __user *name; + uint32_t space_used; /* used space in kbytes */ + uint32_t space_total; /* maximum space in kbytes */ + uint32_t inodes_used; /* used inodes */ + uint32_t inodes_total; /* maximum inodes */ + uint32_t reserved; /* reserved for root in % */ + uint32_t flags; +}; + +#define CDLIM_UNSET ((uint32_t)0UL) +#define CDLIM_INFINITY ((uint32_t)~0UL) +#define CDLIM_KEEP ((uint32_t)~1UL) + +#define DLIME_UNIT 0 +#define DLIME_KILO 1 +#define DLIME_MEGA 2 +#define DLIME_GIGA 3 + +#define DLIMF_SHIFT 0x10 + +#define DLIMS_USED 0 +#define DLIMS_TOTAL 2 + +static inline +uint64_t dlimit_space_32to64(uint32_t val, uint32_t flags, int shift) +{ + int exp = (flags & DLIMF_SHIFT) ? + (flags >> shift) & DLIME_GIGA : DLIME_KILO; + return ((uint64_t)val) << (10 * exp); +} + +static inline +uint32_t dlimit_space_64to32(uint64_t val, uint32_t *flags, int shift) +{ + int exp = 0; + + if (*flags & DLIMF_SHIFT) { + while (val > (1LL << 32) && (exp < 3)) { + val >>= 10; + exp++; + } + *flags &= ~(DLIME_GIGA << shift); + *flags |= exp << shift; + } else + val >>= 10; + return val; +} + +#ifdef __KERNEL__ + +#ifdef CONFIG_COMPAT + +#include + +struct vcmd_ctx_dlimit_base_v0_x32 { + compat_uptr_t name_ptr; + uint32_t flags; +}; + +struct vcmd_ctx_dlimit_v0_x32 { + compat_uptr_t name_ptr; + uint32_t space_used; /* used space in kbytes */ + uint32_t space_total; /* maximum space in kbytes */ + uint32_t inodes_used; /* used inodes */ + uint32_t inodes_total; /* maximum inodes */ + uint32_t reserved; /* reserved for root in % */ + uint32_t flags; +}; + +#endif /* CONFIG_COMPAT */ + +#include + +extern int vc_add_dlimit(uint32_t, void __user *); +extern int vc_rem_dlimit(uint32_t, void __user *); + +extern int vc_set_dlimit(uint32_t, void __user *); +extern int vc_get_dlimit(uint32_t, void __user *); + +#ifdef CONFIG_COMPAT + +extern int vc_add_dlimit_x32(uint32_t, void __user *); +extern int vc_rem_dlimit_x32(uint32_t, void __user *); + +extern int vc_set_dlimit_x32(uint32_t, void __user *); +extern int vc_get_dlimit_x32(uint32_t, void __user *); + +#endif /* CONFIG_COMPAT */ + +#endif /* __KERNEL__ */ +#endif /* _VX_DLIMIT_CMD_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/dlimit.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/dlimit.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/dlimit.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/dlimit.h 2012-01-16 14:51:21.973408505 +0100 @@ -0,0 +1,54 @@ +#ifndef _VX_DLIMIT_H +#define _VX_DLIMIT_H + +#include "switch.h" + + +#ifdef __KERNEL__ + +/* keep in sync with CDLIM_INFINITY */ + +#define DLIM_INFINITY (~0ULL) + +#include +#include + +struct super_block; + +struct dl_info { + struct hlist_node dl_hlist; /* linked list of contexts */ + struct rcu_head dl_rcu; /* the rcu head */ + tag_t dl_tag; /* context tag */ + atomic_t dl_usecnt; /* usage count */ + atomic_t dl_refcnt; /* reference count */ + + struct super_block *dl_sb; /* associated superblock */ + + spinlock_t dl_lock; /* protect the values */ + + unsigned long long dl_space_used; /* used space in bytes */ + unsigned long long dl_space_total; /* maximum space in bytes */ + unsigned long dl_inodes_used; /* used inodes */ + unsigned long dl_inodes_total; /* maximum inodes */ + + unsigned int dl_nrlmult; /* non root limit mult */ +}; + +struct rcu_head; + +extern void rcu_free_dl_info(struct rcu_head *); +extern void unhash_dl_info(struct dl_info *); + +extern struct dl_info *locate_dl_info(struct super_block *, tag_t); + + +struct kstatfs; + +extern void vx_vsi_statfs(struct super_block *, struct kstatfs *); + +typedef uint64_t dlsize_t; + +#endif /* __KERNEL__ */ +#else /* _VX_DLIMIT_H */ +#warning duplicate inclusion +#endif /* _VX_DLIMIT_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/global.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/global.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/global.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/global.h 2012-01-16 14:51:21.981408477 +0100 @@ -0,0 +1,19 @@ +#ifndef _VX_GLOBAL_H +#define _VX_GLOBAL_H + + +extern atomic_t vx_global_ctotal; +extern atomic_t vx_global_cactive; + +extern atomic_t nx_global_ctotal; +extern atomic_t nx_global_cactive; + +extern atomic_t vs_global_nsproxy; +extern atomic_t vs_global_fs; +extern atomic_t vs_global_mnt_ns; +extern atomic_t vs_global_uts_ns; +extern atomic_t vs_global_user_ns; +extern atomic_t vs_global_pid_ns; + + +#endif /* _VX_GLOBAL_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/history.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/history.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/history.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/history.h 2012-01-16 14:51:21.981408477 +0100 @@ -0,0 +1,197 @@ +#ifndef _VX_HISTORY_H +#define _VX_HISTORY_H + + +enum { + VXH_UNUSED = 0, + VXH_THROW_OOPS = 1, + + VXH_GET_VX_INFO, + VXH_PUT_VX_INFO, + VXH_INIT_VX_INFO, + VXH_SET_VX_INFO, + VXH_CLR_VX_INFO, + VXH_CLAIM_VX_INFO, + VXH_RELEASE_VX_INFO, + VXH_ALLOC_VX_INFO, + VXH_DEALLOC_VX_INFO, + VXH_HASH_VX_INFO, + VXH_UNHASH_VX_INFO, + VXH_LOC_VX_INFO, + VXH_LOOKUP_VX_INFO, + VXH_CREATE_VX_INFO, +}; + +struct _vxhe_vxi { + struct vx_info *ptr; + unsigned xid; + unsigned usecnt; + unsigned tasks; +}; + +struct _vxhe_set_clr { + void *data; +}; + +struct _vxhe_loc_lookup { + unsigned arg; +}; + +struct _vx_hist_entry { + void *loc; + unsigned short seq; + unsigned short type; + struct _vxhe_vxi vxi; + union { + struct _vxhe_set_clr sc; + struct _vxhe_loc_lookup ll; + }; +}; + +#ifdef CONFIG_VSERVER_HISTORY + +extern unsigned volatile int vxh_active; + +struct _vx_hist_entry *vxh_advance(void *loc); + + +static inline +void __vxh_copy_vxi(struct _vx_hist_entry *entry, struct vx_info *vxi) +{ + entry->vxi.ptr = vxi; + if (vxi) { + entry->vxi.usecnt = atomic_read(&vxi->vx_usecnt); + entry->vxi.tasks = atomic_read(&vxi->vx_tasks); + entry->vxi.xid = vxi->vx_id; + } +} + + +#define __HERE__ current_text_addr() + +#define __VXH_BODY(__type, __data, __here) \ + struct _vx_hist_entry *entry; \ + \ + preempt_disable(); \ + entry = vxh_advance(__here); \ + __data; \ + entry->type = __type; \ + preempt_enable(); + + + /* pass vxi only */ + +#define __VXH_SMPL \ + __vxh_copy_vxi(entry, vxi) + +static inline +void __vxh_smpl(struct vx_info *vxi, int __type, void *__here) +{ + __VXH_BODY(__type, __VXH_SMPL, __here) +} + + /* pass vxi and data (void *) */ + +#define __VXH_DATA \ + __vxh_copy_vxi(entry, vxi); \ + entry->sc.data = data + +static inline +void __vxh_data(struct vx_info *vxi, void *data, + int __type, void *__here) +{ + __VXH_BODY(__type, __VXH_DATA, __here) +} + + /* pass vxi and arg (long) */ + +#define __VXH_LONG \ + __vxh_copy_vxi(entry, vxi); \ + entry->ll.arg = arg + +static inline +void __vxh_long(struct vx_info *vxi, long arg, + int __type, void *__here) +{ + __VXH_BODY(__type, __VXH_LONG, __here) +} + + +static inline +void __vxh_throw_oops(void *__here) +{ + __VXH_BODY(VXH_THROW_OOPS, {}, __here); + /* prevent further acquisition */ + vxh_active = 0; +} + + +#define vxh_throw_oops() __vxh_throw_oops(__HERE__); + +#define __vxh_get_vx_info(v, h) __vxh_smpl(v, VXH_GET_VX_INFO, h); +#define __vxh_put_vx_info(v, h) __vxh_smpl(v, VXH_PUT_VX_INFO, h); + +#define __vxh_init_vx_info(v, d, h) \ + __vxh_data(v, d, VXH_INIT_VX_INFO, h); +#define __vxh_set_vx_info(v, d, h) \ + __vxh_data(v, d, VXH_SET_VX_INFO, h); +#define __vxh_clr_vx_info(v, d, h) \ + __vxh_data(v, d, VXH_CLR_VX_INFO, h); + +#define __vxh_claim_vx_info(v, d, h) \ + __vxh_data(v, d, VXH_CLAIM_VX_INFO, h); +#define __vxh_release_vx_info(v, d, h) \ + __vxh_data(v, d, VXH_RELEASE_VX_INFO, h); + +#define vxh_alloc_vx_info(v) \ + __vxh_smpl(v, VXH_ALLOC_VX_INFO, __HERE__); +#define vxh_dealloc_vx_info(v) \ + __vxh_smpl(v, VXH_DEALLOC_VX_INFO, __HERE__); + +#define vxh_hash_vx_info(v) \ + __vxh_smpl(v, VXH_HASH_VX_INFO, __HERE__); +#define vxh_unhash_vx_info(v) \ + __vxh_smpl(v, VXH_UNHASH_VX_INFO, __HERE__); + +#define vxh_loc_vx_info(v, l) \ + __vxh_long(v, l, VXH_LOC_VX_INFO, __HERE__); +#define vxh_lookup_vx_info(v, l) \ + __vxh_long(v, l, VXH_LOOKUP_VX_INFO, __HERE__); +#define vxh_create_vx_info(v, l) \ + __vxh_long(v, l, VXH_CREATE_VX_INFO, __HERE__); + +extern void vxh_dump_history(void); + + +#else /* CONFIG_VSERVER_HISTORY */ + +#define __HERE__ 0 + +#define vxh_throw_oops() do { } while (0) + +#define __vxh_get_vx_info(v, h) do { } while (0) +#define __vxh_put_vx_info(v, h) do { } while (0) + +#define __vxh_init_vx_info(v, d, h) do { } while (0) +#define __vxh_set_vx_info(v, d, h) do { } while (0) +#define __vxh_clr_vx_info(v, d, h) do { } while (0) + +#define __vxh_claim_vx_info(v, d, h) do { } while (0) +#define __vxh_release_vx_info(v, d, h) do { } while (0) + +#define vxh_alloc_vx_info(v) do { } while (0) +#define vxh_dealloc_vx_info(v) do { } while (0) + +#define vxh_hash_vx_info(v) do { } while (0) +#define vxh_unhash_vx_info(v) do { } while (0) + +#define vxh_loc_vx_info(v, l) do { } while (0) +#define vxh_lookup_vx_info(v, l) do { } while (0) +#define vxh_create_vx_info(v, l) do { } while (0) + +#define vxh_dump_history() do { } while (0) + + +#endif /* CONFIG_VSERVER_HISTORY */ + +#endif /* _VX_HISTORY_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/inode_cmd.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/inode_cmd.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/inode_cmd.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/inode_cmd.h 2012-01-16 14:51:21.981408477 +0100 @@ -0,0 +1,59 @@ +#ifndef _VX_INODE_CMD_H +#define _VX_INODE_CMD_H + + +/* inode vserver commands */ + +#define VCMD_get_iattr VC_CMD(INODE, 1, 1) +#define VCMD_set_iattr VC_CMD(INODE, 2, 1) + +#define VCMD_fget_iattr VC_CMD(INODE, 3, 0) +#define VCMD_fset_iattr VC_CMD(INODE, 4, 0) + +struct vcmd_ctx_iattr_v1 { + const char __user *name; + uint32_t tag; + uint32_t flags; + uint32_t mask; +}; + +struct vcmd_ctx_fiattr_v0 { + uint32_t tag; + uint32_t flags; + uint32_t mask; +}; + + +#ifdef __KERNEL__ + + +#ifdef CONFIG_COMPAT + +#include + +struct vcmd_ctx_iattr_v1_x32 { + compat_uptr_t name_ptr; + uint32_t tag; + uint32_t flags; + uint32_t mask; +}; + +#endif /* CONFIG_COMPAT */ + +#include + +extern int vc_get_iattr(void __user *); +extern int vc_set_iattr(void __user *); + +extern int vc_fget_iattr(uint32_t, void __user *); +extern int vc_fset_iattr(uint32_t, void __user *); + +#ifdef CONFIG_COMPAT + +extern int vc_get_iattr_x32(void __user *); +extern int vc_set_iattr_x32(void __user *); + +#endif /* CONFIG_COMPAT */ + +#endif /* __KERNEL__ */ +#endif /* _VX_INODE_CMD_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/inode.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/inode.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/inode.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/inode.h 2012-01-16 14:51:21.981408477 +0100 @@ -0,0 +1,39 @@ +#ifndef _VX_INODE_H +#define _VX_INODE_H + + +#define IATTR_TAG 0x01000000 + +#define IATTR_ADMIN 0x00000001 +#define IATTR_WATCH 0x00000002 +#define IATTR_HIDE 0x00000004 +#define IATTR_FLAGS 0x00000007 + +#define IATTR_BARRIER 0x00010000 +#define IATTR_IXUNLINK 0x00020000 +#define IATTR_IMMUTABLE 0x00040000 +#define IATTR_COW 0x00080000 + +#ifdef __KERNEL__ + + +#ifdef CONFIG_VSERVER_PROC_SECURE +#define IATTR_PROC_DEFAULT ( IATTR_ADMIN | IATTR_HIDE ) +#define IATTR_PROC_SYMLINK ( IATTR_ADMIN ) +#else +#define IATTR_PROC_DEFAULT ( IATTR_ADMIN ) +#define IATTR_PROC_SYMLINK ( IATTR_ADMIN ) +#endif + +#define vx_hide_check(c, m) (((m) & IATTR_HIDE) ? vx_check(c, m) : 1) + +#endif /* __KERNEL__ */ + +/* inode ioctls */ + +#define FIOC_GETXFLG _IOR('x', 5, long) +#define FIOC_SETXFLG _IOW('x', 6, long) + +#else /* _VX_INODE_H */ +#warning duplicate inclusion +#endif /* _VX_INODE_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/Kbuild kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/Kbuild --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/Kbuild 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/Kbuild 2012-01-16 14:51:21.969408519 +0100 @@ -0,0 +1,8 @@ + +unifdef-y += context_cmd.h network_cmd.h space_cmd.h \ + cacct_cmd.h cvirt_cmd.h limit_cmd.h dlimit_cmd.h \ + inode_cmd.h tag_cmd.h sched_cmd.h signal_cmd.h \ + debug_cmd.h device_cmd.h + +unifdef-y += switch.h network.h monitor.h inode.h device.h + diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/limit_cmd.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/limit_cmd.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/limit_cmd.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/limit_cmd.h 2012-01-16 14:51:21.981408477 +0100 @@ -0,0 +1,71 @@ +#ifndef _VX_LIMIT_CMD_H +#define _VX_LIMIT_CMD_H + + +/* rlimit vserver commands */ + +#define VCMD_get_rlimit VC_CMD(RLIMIT, 1, 0) +#define VCMD_set_rlimit VC_CMD(RLIMIT, 2, 0) +#define VCMD_get_rlimit_mask VC_CMD(RLIMIT, 3, 0) +#define VCMD_reset_hits VC_CMD(RLIMIT, 7, 0) +#define VCMD_reset_minmax VC_CMD(RLIMIT, 9, 0) + +struct vcmd_ctx_rlimit_v0 { + uint32_t id; + uint64_t minimum; + uint64_t softlimit; + uint64_t maximum; +}; + +struct vcmd_ctx_rlimit_mask_v0 { + uint32_t minimum; + uint32_t softlimit; + uint32_t maximum; +}; + +#define VCMD_rlimit_stat VC_CMD(VSTAT, 1, 0) + +struct vcmd_rlimit_stat_v0 { + uint32_t id; + uint32_t hits; + uint64_t value; + uint64_t minimum; + uint64_t maximum; +}; + +#define CRLIM_UNSET (0ULL) +#define CRLIM_INFINITY (~0ULL) +#define CRLIM_KEEP (~1ULL) + +#ifdef __KERNEL__ + +#ifdef CONFIG_IA32_EMULATION + +struct vcmd_ctx_rlimit_v0_x32 { + uint32_t id; + uint64_t minimum; + uint64_t softlimit; + uint64_t maximum; +} __attribute__ ((packed)); + +#endif /* CONFIG_IA32_EMULATION */ + +#include + +extern int vc_get_rlimit_mask(uint32_t, void __user *); +extern int vc_get_rlimit(struct vx_info *, void __user *); +extern int vc_set_rlimit(struct vx_info *, void __user *); +extern int vc_reset_hits(struct vx_info *, void __user *); +extern int vc_reset_minmax(struct vx_info *, void __user *); + +extern int vc_rlimit_stat(struct vx_info *, void __user *); + +#ifdef CONFIG_IA32_EMULATION + +extern int vc_get_rlimit_x32(struct vx_info *, void __user *); +extern int vc_set_rlimit_x32(struct vx_info *, void __user *); + +#endif /* CONFIG_IA32_EMULATION */ + +#endif /* __KERNEL__ */ +#endif /* _VX_LIMIT_CMD_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/limit_def.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/limit_def.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/limit_def.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/limit_def.h 2012-01-16 14:51:21.981408477 +0100 @@ -0,0 +1,47 @@ +#ifndef _VX_LIMIT_DEF_H +#define _VX_LIMIT_DEF_H + +#include +#include + +#include "limit.h" + + +struct _vx_res_limit { + rlim_t soft; /* Context soft limit */ + rlim_t hard; /* Context hard limit */ + + rlim_atomic_t rcur; /* Current value */ + rlim_t rmin; /* Context minimum */ + rlim_t rmax; /* Context maximum */ + + atomic_t lhit; /* Limit hits */ +}; + +/* context sub struct */ + +struct _vx_limit { + struct _vx_res_limit res[NUM_LIMITS]; +}; + +#ifdef CONFIG_VSERVER_DEBUG + +static inline void __dump_vx_limit(struct _vx_limit *limit) +{ + int i; + + printk("\t_vx_limit:"); + for (i = 0; i < NUM_LIMITS; i++) { + printk("\t [%2d] = %8lu %8lu/%8lu, %8ld/%8ld, %8d\n", + i, (unsigned long)__rlim_get(limit, i), + (unsigned long)__rlim_rmin(limit, i), + (unsigned long)__rlim_rmax(limit, i), + (long)__rlim_soft(limit, i), + (long)__rlim_hard(limit, i), + atomic_read(&__rlim_lhit(limit, i))); + } +} + +#endif + +#endif /* _VX_LIMIT_DEF_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/limit.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/limit.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/limit.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/limit.h 2012-01-16 14:51:21.981408477 +0100 @@ -0,0 +1,71 @@ +#ifndef _VX_LIMIT_H +#define _VX_LIMIT_H + +#define VLIMIT_NSOCK 16 +#define VLIMIT_OPENFD 17 +#define VLIMIT_ANON 18 +#define VLIMIT_SHMEM 19 +#define VLIMIT_SEMARY 20 +#define VLIMIT_NSEMS 21 +#define VLIMIT_DENTRY 22 +#define VLIMIT_MAPPED 23 + + +#ifdef __KERNEL__ + +#define VLIM_NOCHECK ((1L << VLIMIT_DENTRY) | (1L << RLIMIT_RSS)) + +/* keep in sync with CRLIM_INFINITY */ + +#define VLIM_INFINITY (~0ULL) + +#include +#include + +#ifndef RLIM_INFINITY +#warning RLIM_INFINITY is undefined +#endif + +#define __rlim_val(l, r, v) ((l)->res[r].v) + +#define __rlim_soft(l, r) __rlim_val(l, r, soft) +#define __rlim_hard(l, r) __rlim_val(l, r, hard) + +#define __rlim_rcur(l, r) __rlim_val(l, r, rcur) +#define __rlim_rmin(l, r) __rlim_val(l, r, rmin) +#define __rlim_rmax(l, r) __rlim_val(l, r, rmax) + +#define __rlim_lhit(l, r) __rlim_val(l, r, lhit) +#define __rlim_hit(l, r) atomic_inc(&__rlim_lhit(l, r)) + +typedef atomic_long_t rlim_atomic_t; +typedef unsigned long rlim_t; + +#define __rlim_get(l, r) atomic_long_read(&__rlim_rcur(l, r)) +#define __rlim_set(l, r, v) atomic_long_set(&__rlim_rcur(l, r), v) +#define __rlim_inc(l, r) atomic_long_inc(&__rlim_rcur(l, r)) +#define __rlim_dec(l, r) atomic_long_dec(&__rlim_rcur(l, r)) +#define __rlim_add(l, r, v) atomic_long_add(v, &__rlim_rcur(l, r)) +#define __rlim_sub(l, r, v) atomic_long_sub(v, &__rlim_rcur(l, r)) + + +#if (RLIM_INFINITY == VLIM_INFINITY) +#define VX_VLIM(r) ((long long)(long)(r)) +#define VX_RLIM(v) ((rlim_t)(v)) +#else +#define VX_VLIM(r) (((r) == RLIM_INFINITY) \ + ? VLIM_INFINITY : (long long)(r)) +#define VX_RLIM(v) (((v) == VLIM_INFINITY) \ + ? RLIM_INFINITY : (rlim_t)(v)) +#endif + +struct sysinfo; + +void vx_vsi_meminfo(struct sysinfo *); +void vx_vsi_swapinfo(struct sysinfo *); +long vx_vsi_cached(struct sysinfo *); + +#define NUM_LIMITS 24 + +#endif /* __KERNEL__ */ +#endif /* _VX_LIMIT_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/limit_int.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/limit_int.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/limit_int.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/limit_int.h 2012-01-16 14:51:21.985408463 +0100 @@ -0,0 +1,198 @@ +#ifndef _VX_LIMIT_INT_H +#define _VX_LIMIT_INT_H + +#include "context.h" + +#ifdef __KERNEL__ + +#define VXD_RCRES_COND(r) VXD_CBIT(cres, r) +#define VXD_RLIMIT_COND(r) VXD_CBIT(limit, r) + +extern const char *vlimit_name[NUM_LIMITS]; + +static inline void __vx_acc_cres(struct vx_info *vxi, + int res, int dir, void *_data, char *_file, int _line) +{ + if (VXD_RCRES_COND(res)) + vxlprintk(1, "vx_acc_cres[%5d,%s,%2d]: %5ld%s (%p)", + (vxi ? vxi->vx_id : -1), vlimit_name[res], res, + (vxi ? (long)__rlim_get(&vxi->limit, res) : 0), + (dir > 0) ? "++" : "--", _data, _file, _line); + if (!vxi) + return; + + if (dir > 0) + __rlim_inc(&vxi->limit, res); + else + __rlim_dec(&vxi->limit, res); +} + +static inline void __vx_add_cres(struct vx_info *vxi, + int res, int amount, void *_data, char *_file, int _line) +{ + if (VXD_RCRES_COND(res)) + vxlprintk(1, "vx_add_cres[%5d,%s,%2d]: %5ld += %5d (%p)", + (vxi ? vxi->vx_id : -1), vlimit_name[res], res, + (vxi ? (long)__rlim_get(&vxi->limit, res) : 0), + amount, _data, _file, _line); + if (amount == 0) + return; + if (!vxi) + return; + __rlim_add(&vxi->limit, res, amount); +} + +static inline +int __vx_cres_adjust_max(struct _vx_limit *limit, int res, rlim_t value) +{ + int cond = (value > __rlim_rmax(limit, res)); + + if (cond) + __rlim_rmax(limit, res) = value; + return cond; +} + +static inline +int __vx_cres_adjust_min(struct _vx_limit *limit, int res, rlim_t value) +{ + int cond = (value < __rlim_rmin(limit, res)); + + if (cond) + __rlim_rmin(limit, res) = value; + return cond; +} + +static inline +void __vx_cres_fixup(struct _vx_limit *limit, int res, rlim_t value) +{ + if (!__vx_cres_adjust_max(limit, res, value)) + __vx_cres_adjust_min(limit, res, value); +} + + +/* return values: + +1 ... no limit hit + -1 ... over soft limit + 0 ... over hard limit */ + +static inline int __vx_cres_avail(struct vx_info *vxi, + int res, int num, char *_file, int _line) +{ + struct _vx_limit *limit; + rlim_t value; + + if (VXD_RLIMIT_COND(res)) + vxlprintk(1, "vx_cres_avail[%5d,%s,%2d]: %5ld/%5ld > %5ld + %5d", + (vxi ? vxi->vx_id : -1), vlimit_name[res], res, + (vxi ? (long)__rlim_soft(&vxi->limit, res) : -1), + (vxi ? (long)__rlim_hard(&vxi->limit, res) : -1), + (vxi ? (long)__rlim_get(&vxi->limit, res) : 0), + num, _file, _line); + if (!vxi) + return 1; + + limit = &vxi->limit; + value = __rlim_get(limit, res); + + if (!__vx_cres_adjust_max(limit, res, value)) + __vx_cres_adjust_min(limit, res, value); + + if (num == 0) + return 1; + + if (__rlim_soft(limit, res) == RLIM_INFINITY) + return -1; + if (value + num <= __rlim_soft(limit, res)) + return -1; + + if (__rlim_hard(limit, res) == RLIM_INFINITY) + return 1; + if (value + num <= __rlim_hard(limit, res)) + return 1; + + __rlim_hit(limit, res); + return 0; +} + + +static const int VLA_RSS[] = { RLIMIT_RSS, VLIMIT_ANON, VLIMIT_MAPPED, 0 }; + +static inline +rlim_t __vx_cres_array_sum(struct _vx_limit *limit, const int *array) +{ + rlim_t value, sum = 0; + int res; + + while ((res = *array++)) { + value = __rlim_get(limit, res); + __vx_cres_fixup(limit, res, value); + sum += value; + } + return sum; +} + +static inline +rlim_t __vx_cres_array_fixup(struct _vx_limit *limit, const int *array) +{ + rlim_t value = __vx_cres_array_sum(limit, array + 1); + int res = *array; + + if (value == __rlim_get(limit, res)) + return value; + + __rlim_set(limit, res, value); + /* now adjust min/max */ + if (!__vx_cres_adjust_max(limit, res, value)) + __vx_cres_adjust_min(limit, res, value); + + return value; +} + +static inline int __vx_cres_array_avail(struct vx_info *vxi, + const int *array, int num, char *_file, int _line) +{ + struct _vx_limit *limit; + rlim_t value = 0; + int res; + + if (num == 0) + return 1; + if (!vxi) + return 1; + + limit = &vxi->limit; + res = *array; + value = __vx_cres_array_sum(limit, array + 1); + + __rlim_set(limit, res, value); + __vx_cres_fixup(limit, res, value); + + return __vx_cres_avail(vxi, res, num, _file, _line); +} + + +static inline void vx_limit_fixup(struct _vx_limit *limit, int id) +{ + rlim_t value; + int res; + + /* complex resources first */ + if ((id < 0) || (id == RLIMIT_RSS)) + __vx_cres_array_fixup(limit, VLA_RSS); + + for (res = 0; res < NUM_LIMITS; res++) { + if ((id > 0) && (res != id)) + continue; + + value = __rlim_get(limit, res); + __vx_cres_fixup(limit, res, value); + + /* not supposed to happen, maybe warn? */ + if (__rlim_rmax(limit, res) > __rlim_hard(limit, res)) + __rlim_rmax(limit, res) = __rlim_hard(limit, res); + } +} + + +#endif /* __KERNEL__ */ +#endif /* _VX_LIMIT_INT_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/monitor.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/monitor.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/monitor.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/monitor.h 2012-01-16 14:51:21.985408463 +0100 @@ -0,0 +1,96 @@ +#ifndef _VX_MONITOR_H +#define _VX_MONITOR_H + +#include + +enum { + VXM_UNUSED = 0, + + VXM_SYNC = 0x10, + + VXM_UPDATE = 0x20, + VXM_UPDATE_1, + VXM_UPDATE_2, + + VXM_RQINFO_1 = 0x24, + VXM_RQINFO_2, + + VXM_ACTIVATE = 0x40, + VXM_DEACTIVATE, + VXM_IDLE, + + VXM_HOLD = 0x44, + VXM_UNHOLD, + + VXM_MIGRATE = 0x48, + VXM_RESCHED, + + /* all other bits are flags */ + VXM_SCHED = 0x80, +}; + +struct _vxm_update_1 { + uint32_t tokens_max; + uint32_t fill_rate; + uint32_t interval; +}; + +struct _vxm_update_2 { + uint32_t tokens_min; + uint32_t fill_rate; + uint32_t interval; +}; + +struct _vxm_rqinfo_1 { + uint16_t running; + uint16_t onhold; + uint16_t iowait; + uint16_t uintr; + uint32_t idle_tokens; +}; + +struct _vxm_rqinfo_2 { + uint32_t norm_time; + uint32_t idle_time; + uint32_t idle_skip; +}; + +struct _vxm_sched { + uint32_t tokens; + uint32_t norm_time; + uint32_t idle_time; +}; + +struct _vxm_task { + uint16_t pid; + uint16_t state; +}; + +struct _vxm_event { + uint32_t jif; + union { + uint32_t seq; + uint32_t sec; + }; + union { + uint32_t tokens; + uint32_t nsec; + struct _vxm_task tsk; + }; +}; + +struct _vx_mon_entry { + uint16_t type; + uint16_t xid; + union { + struct _vxm_event ev; + struct _vxm_sched sd; + struct _vxm_update_1 u1; + struct _vxm_update_2 u2; + struct _vxm_rqinfo_1 q1; + struct _vxm_rqinfo_2 q2; + }; +}; + + +#endif /* _VX_MONITOR_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/network_cmd.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/network_cmd.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/network_cmd.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/network_cmd.h 2012-01-16 14:51:21.985408463 +0100 @@ -0,0 +1,150 @@ +#ifndef _VX_NETWORK_CMD_H +#define _VX_NETWORK_CMD_H + + +/* vinfo commands */ + +#define VCMD_task_nid VC_CMD(VINFO, 2, 0) + +#ifdef __KERNEL__ +extern int vc_task_nid(uint32_t); + +#endif /* __KERNEL__ */ + +#define VCMD_nx_info VC_CMD(VINFO, 6, 0) + +struct vcmd_nx_info_v0 { + uint32_t nid; + /* more to come */ +}; + +#ifdef __KERNEL__ +extern int vc_nx_info(struct nx_info *, void __user *); + +#endif /* __KERNEL__ */ + +#include +#include + +#define VCMD_net_create_v0 VC_CMD(VNET, 1, 0) +#define VCMD_net_create VC_CMD(VNET, 1, 1) + +struct vcmd_net_create { + uint64_t flagword; +}; + +#define VCMD_net_migrate VC_CMD(NETMIG, 1, 0) + +#define VCMD_net_add VC_CMD(NETALT, 1, 0) +#define VCMD_net_remove VC_CMD(NETALT, 2, 0) + +struct vcmd_net_addr_v0 { + uint16_t type; + uint16_t count; + struct in_addr ip[4]; + struct in_addr mask[4]; +}; + +#define VCMD_net_add_ipv4 VC_CMD(NETALT, 1, 1) +#define VCMD_net_remove_ipv4 VC_CMD(NETALT, 2, 1) + +struct vcmd_net_addr_ipv4_v1 { + uint16_t type; + uint16_t flags; + struct in_addr ip; + struct in_addr mask; +}; + +#define VCMD_net_add_ipv6 VC_CMD(NETALT, 3, 1) +#define VCMD_net_remove_ipv6 VC_CMD(NETALT, 4, 1) + +struct vcmd_net_addr_ipv6_v1 { + uint16_t type; + uint16_t flags; + uint32_t prefix; + struct in6_addr ip; + struct in6_addr mask; +}; + +#define VCMD_add_match_ipv4 VC_CMD(NETALT, 5, 0) +#define VCMD_get_match_ipv4 VC_CMD(NETALT, 6, 0) + +struct vcmd_match_ipv4_v0 { + uint16_t type; + uint16_t flags; + uint16_t parent; + uint16_t prefix; + struct in_addr ip; + struct in_addr ip2; + struct in_addr mask; +}; + +#define VCMD_add_match_ipv6 VC_CMD(NETALT, 7, 0) +#define VCMD_get_match_ipv6 VC_CMD(NETALT, 8, 0) + +struct vcmd_match_ipv6_v0 { + uint16_t type; + uint16_t flags; + uint16_t parent; + uint16_t prefix; + struct in6_addr ip; + struct in6_addr ip2; + struct in6_addr mask; +}; + + +#ifdef __KERNEL__ +extern int vc_net_create(uint32_t, void __user *); +extern int vc_net_migrate(struct nx_info *, void __user *); + +extern int vc_net_add(struct nx_info *, void __user *); +extern int vc_net_remove(struct nx_info *, void __user *); + +extern int vc_net_add_ipv4(struct nx_info *, void __user *); +extern int vc_net_remove_ipv4(struct nx_info *, void __user *); + +extern int vc_net_add_ipv6(struct nx_info *, void __user *); +extern int vc_net_remove_ipv6(struct nx_info *, void __user *); + +extern int vc_add_match_ipv4(struct nx_info *, void __user *); +extern int vc_get_match_ipv4(struct nx_info *, void __user *); + +extern int vc_add_match_ipv6(struct nx_info *, void __user *); +extern int vc_get_match_ipv6(struct nx_info *, void __user *); + +#endif /* __KERNEL__ */ + + +/* flag commands */ + +#define VCMD_get_nflags VC_CMD(FLAGS, 5, 0) +#define VCMD_set_nflags VC_CMD(FLAGS, 6, 0) + +struct vcmd_net_flags_v0 { + uint64_t flagword; + uint64_t mask; +}; + +#ifdef __KERNEL__ +extern int vc_get_nflags(struct nx_info *, void __user *); +extern int vc_set_nflags(struct nx_info *, void __user *); + +#endif /* __KERNEL__ */ + + +/* network caps commands */ + +#define VCMD_get_ncaps VC_CMD(FLAGS, 7, 0) +#define VCMD_set_ncaps VC_CMD(FLAGS, 8, 0) + +struct vcmd_net_caps_v0 { + uint64_t ncaps; + uint64_t cmask; +}; + +#ifdef __KERNEL__ +extern int vc_get_ncaps(struct nx_info *, void __user *); +extern int vc_set_ncaps(struct nx_info *, void __user *); + +#endif /* __KERNEL__ */ +#endif /* _VX_CONTEXT_CMD_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/network.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/network.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/network.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/network.h 2012-01-16 14:51:21.985408463 +0100 @@ -0,0 +1,146 @@ +#ifndef _VX_NETWORK_H +#define _VX_NETWORK_H + +#include + + +#define MAX_N_CONTEXT 65535 /* Arbitrary limit */ + + +/* network flags */ + +#define NXF_INFO_PRIVATE 0x00000008 + +#define NXF_SINGLE_IP 0x00000100 +#define NXF_LBACK_REMAP 0x00000200 +#define NXF_LBACK_ALLOW 0x00000400 + +#define NXF_HIDE_NETIF 0x02000000 +#define NXF_HIDE_LBACK 0x04000000 + +#define NXF_STATE_SETUP (1ULL << 32) +#define NXF_STATE_ADMIN (1ULL << 34) + +#define NXF_SC_HELPER (1ULL << 36) +#define NXF_PERSISTENT (1ULL << 38) + +#define NXF_ONE_TIME (0x0005ULL << 32) + + +#define NXF_INIT_SET (__nxf_init_set()) + +static inline uint64_t __nxf_init_set(void) { + return NXF_STATE_ADMIN +#ifdef CONFIG_VSERVER_AUTO_LBACK + | NXF_LBACK_REMAP + | NXF_HIDE_LBACK +#endif +#ifdef CONFIG_VSERVER_AUTO_SINGLE + | NXF_SINGLE_IP +#endif + | NXF_HIDE_NETIF; +} + + +/* network caps */ + +#define NXC_TUN_CREATE 0x00000001 + +#define NXC_RAW_ICMP 0x00000100 + + +/* address types */ + +#define NXA_TYPE_IPV4 0x0001 +#define NXA_TYPE_IPV6 0x0002 + +#define NXA_TYPE_NONE 0x0000 +#define NXA_TYPE_ANY 0x00FF + +#define NXA_TYPE_ADDR 0x0010 +#define NXA_TYPE_MASK 0x0020 +#define NXA_TYPE_RANGE 0x0040 + +#define NXA_MASK_ALL (NXA_TYPE_ADDR | NXA_TYPE_MASK | NXA_TYPE_RANGE) + +#define NXA_MOD_BCAST 0x0100 +#define NXA_MOD_LBACK 0x0200 + +#define NXA_LOOPBACK 0x1000 + +#define NXA_MASK_BIND (NXA_MASK_ALL | NXA_MOD_BCAST | NXA_MOD_LBACK) +#define NXA_MASK_SHOW (NXA_MASK_ALL | NXA_LOOPBACK) + +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include +#include + +struct nx_addr_v4 { + struct nx_addr_v4 *next; + struct in_addr ip[2]; + struct in_addr mask; + uint16_t type; + uint16_t flags; +}; + +struct nx_addr_v6 { + struct nx_addr_v6 *next; + struct in6_addr ip; + struct in6_addr mask; + uint32_t prefix; + uint16_t type; + uint16_t flags; +}; + +struct nx_info { + struct hlist_node nx_hlist; /* linked list of nxinfos */ + nid_t nx_id; /* vnet id */ + atomic_t nx_usecnt; /* usage count */ + atomic_t nx_tasks; /* tasks count */ + int nx_state; /* context state */ + + uint64_t nx_flags; /* network flag word */ + uint64_t nx_ncaps; /* network capabilities */ + + struct in_addr v4_lback; /* Loopback address */ + struct in_addr v4_bcast; /* Broadcast address */ + struct nx_addr_v4 v4; /* First/Single ipv4 address */ +#ifdef CONFIG_IPV6 + struct nx_addr_v6 v6; /* First/Single ipv6 address */ +#endif + char nx_name[65]; /* network context name */ +}; + + +/* status flags */ + +#define NXS_HASHED 0x0001 +#define NXS_SHUTDOWN 0x0100 +#define NXS_RELEASED 0x8000 + +extern struct nx_info *lookup_nx_info(int); + +extern int get_nid_list(int, unsigned int *, int); +extern int nid_is_hashed(nid_t); + +extern int nx_migrate_task(struct task_struct *, struct nx_info *); + +extern long vs_net_change(struct nx_info *, unsigned int); + +struct sock; + + +#define NX_IPV4(n) ((n)->v4.type != NXA_TYPE_NONE) +#ifdef CONFIG_IPV6 +#define NX_IPV6(n) ((n)->v6.type != NXA_TYPE_NONE) +#else +#define NX_IPV6(n) (0) +#endif + +#endif /* __KERNEL__ */ +#endif /* _VX_NETWORK_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/percpu.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/percpu.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/percpu.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/percpu.h 2012-01-16 14:51:21.985408463 +0100 @@ -0,0 +1,14 @@ +#ifndef _VX_PERCPU_H +#define _VX_PERCPU_H + +#include "cvirt_def.h" +#include "sched_def.h" + +struct _vx_percpu { + struct _vx_cvirt_pc cvirt; + struct _vx_sched_pc sched; +}; + +#define PERCPU_PERCTX (sizeof(struct _vx_percpu)) + +#endif /* _VX_PERCPU_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/pid.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/pid.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/pid.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/pid.h 2012-01-16 14:51:21.985408463 +0100 @@ -0,0 +1,51 @@ +#ifndef _VSERVER_PID_H +#define _VSERVER_PID_H + +/* pid faking stuff */ + +#define vx_info_map_pid(v, p) \ + __vx_info_map_pid((v), (p), __func__, __FILE__, __LINE__) +#define vx_info_map_tgid(v,p) vx_info_map_pid(v,p) +#define vx_map_pid(p) vx_info_map_pid(current_vx_info(), p) +#define vx_map_tgid(p) vx_map_pid(p) + +static inline int __vx_info_map_pid(struct vx_info *vxi, int pid, + const char *func, const char *file, int line) +{ + if (vx_info_flags(vxi, VXF_INFO_INIT, 0)) { + vxfprintk(VXD_CBIT(cvirt, 2), + "vx_map_tgid: %p/%llx: %d -> %d", + vxi, (long long)vxi->vx_flags, pid, + (pid && pid == vxi->vx_initpid) ? 1 : pid, + func, file, line); + if (pid == 0) + return 0; + if (pid == vxi->vx_initpid) + return 1; + } + return pid; +} + +#define vx_info_rmap_pid(v, p) \ + __vx_info_rmap_pid((v), (p), __func__, __FILE__, __LINE__) +#define vx_rmap_pid(p) vx_info_rmap_pid(current_vx_info(), p) +#define vx_rmap_tgid(p) vx_rmap_pid(p) + +static inline int __vx_info_rmap_pid(struct vx_info *vxi, int pid, + const char *func, const char *file, int line) +{ + if (vx_info_flags(vxi, VXF_INFO_INIT, 0)) { + vxfprintk(VXD_CBIT(cvirt, 2), + "vx_rmap_tgid: %p/%llx: %d -> %d", + vxi, (long long)vxi->vx_flags, pid, + (pid == 1) ? vxi->vx_initpid : pid, + func, file, line); + if ((pid == 1) && vxi->vx_initpid) + return vxi->vx_initpid; + if (pid == vxi->vx_initpid) + return ~0U; + } + return pid; +} + +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/sched_cmd.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/sched_cmd.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/sched_cmd.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/sched_cmd.h 2012-01-16 14:51:21.985408463 +0100 @@ -0,0 +1,108 @@ +#ifndef _VX_SCHED_CMD_H +#define _VX_SCHED_CMD_H + + +/* sched vserver commands */ + +#define VCMD_set_sched_v2 VC_CMD(SCHED, 1, 2) +#define VCMD_set_sched_v3 VC_CMD(SCHED, 1, 3) +#define VCMD_set_sched_v4 VC_CMD(SCHED, 1, 4) + +struct vcmd_set_sched_v2 { + int32_t fill_rate; + int32_t interval; + int32_t tokens; + int32_t tokens_min; + int32_t tokens_max; + uint64_t cpu_mask; +}; + +struct vcmd_set_sched_v3 { + uint32_t set_mask; + int32_t fill_rate; + int32_t interval; + int32_t tokens; + int32_t tokens_min; + int32_t tokens_max; + int32_t priority_bias; +}; + +struct vcmd_set_sched_v4 { + uint32_t set_mask; + int32_t fill_rate; + int32_t interval; + int32_t tokens; + int32_t tokens_min; + int32_t tokens_max; + int32_t prio_bias; + int32_t cpu_id; + int32_t bucket_id; +}; + +#define VCMD_set_sched VC_CMD(SCHED, 1, 5) +#define VCMD_get_sched VC_CMD(SCHED, 2, 5) + +struct vcmd_sched_v5 { + uint32_t mask; + int32_t cpu_id; + int32_t bucket_id; + int32_t fill_rate[2]; + int32_t interval[2]; + int32_t tokens; + int32_t tokens_min; + int32_t tokens_max; + int32_t prio_bias; +}; + +#define VXSM_FILL_RATE 0x0001 +#define VXSM_INTERVAL 0x0002 +#define VXSM_FILL_RATE2 0x0004 +#define VXSM_INTERVAL2 0x0008 +#define VXSM_TOKENS 0x0010 +#define VXSM_TOKENS_MIN 0x0020 +#define VXSM_TOKENS_MAX 0x0040 +#define VXSM_PRIO_BIAS 0x0100 + +#define VXSM_IDLE_TIME 0x0200 +#define VXSM_FORCE 0x0400 + +#define VXSM_V3_MASK 0x0173 +#define VXSM_SET_MASK 0x01FF + +#define VXSM_CPU_ID 0x1000 +#define VXSM_BUCKET_ID 0x2000 + +#define VXSM_MSEC 0x4000 + +#define SCHED_KEEP (-2) /* only for v2 */ + +#ifdef __KERNEL__ + +#include + +extern int vc_set_sched_v2(struct vx_info *, void __user *); +extern int vc_set_sched_v3(struct vx_info *, void __user *); +extern int vc_set_sched_v4(struct vx_info *, void __user *); +extern int vc_set_sched(struct vx_info *, void __user *); +extern int vc_get_sched(struct vx_info *, void __user *); + +#endif /* __KERNEL__ */ + +#define VCMD_sched_info VC_CMD(SCHED, 3, 0) + +struct vcmd_sched_info { + int32_t cpu_id; + int32_t bucket_id; + uint64_t user_msec; + uint64_t sys_msec; + uint64_t hold_msec; + uint32_t token_usec; + int32_t vavavoom; +}; + +#ifdef __KERNEL__ + +extern int vc_sched_info(struct vx_info *, void __user *); + +#endif /* __KERNEL__ */ +#endif /* _VX_SCHED_CMD_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/sched_def.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/sched_def.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/sched_def.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/sched_def.h 2012-01-16 14:51:21.985408463 +0100 @@ -0,0 +1,68 @@ +#ifndef _VX_SCHED_DEF_H +#define _VX_SCHED_DEF_H + +#include +#include +#include +#include +#include + + +/* context sub struct */ + +struct _vx_sched { + spinlock_t tokens_lock; /* lock for token bucket */ + + int tokens; /* number of CPU tokens */ + int fill_rate[2]; /* Fill rate: add X tokens... */ + int interval[2]; /* Divisor: per Y jiffies */ + int tokens_min; /* Limit: minimum for unhold */ + int tokens_max; /* Limit: no more than N tokens */ + + int prio_bias; /* bias offset for priority */ + + unsigned update_mask; /* which features should be updated */ + cpumask_t update; /* CPUs which should update */ +}; + +struct _vx_sched_pc { + int tokens; /* number of CPU tokens */ + int flags; /* bucket flags */ + + int fill_rate[2]; /* Fill rate: add X tokens... */ + int interval[2]; /* Divisor: per Y jiffies */ + int tokens_min; /* Limit: minimum for unhold */ + int tokens_max; /* Limit: no more than N tokens */ + + int prio_bias; /* bias offset for priority */ + int vavavoom; /* last calculated vavavoom */ + + unsigned long norm_time; /* last time accounted */ + unsigned long idle_time; /* non linear time for fair sched */ + unsigned long token_time; /* token time for accounting */ + unsigned long onhold; /* jiffies when put on hold */ + + uint64_t user_ticks; /* token tick events */ + uint64_t sys_ticks; /* token tick events */ + uint64_t hold_ticks; /* token ticks paused */ +}; + + +#define VXSF_ONHOLD 0x0001 +#define VXSF_IDLE_TIME 0x0100 + +#ifdef CONFIG_VSERVER_DEBUG + +static inline void __dump_vx_sched(struct _vx_sched *sched) +{ + printk("\t_vx_sched:\n"); + printk("\t tokens: %4d/%4d, %4d/%4d, %4d, %4d\n", + sched->fill_rate[0], sched->interval[0], + sched->fill_rate[1], sched->interval[1], + sched->tokens_min, sched->tokens_max); + printk("\t priority = %4d\n", sched->prio_bias); +} + +#endif + +#endif /* _VX_SCHED_DEF_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/sched.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/sched.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/sched.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/sched.h 2012-01-16 14:51:21.985408463 +0100 @@ -0,0 +1,26 @@ +#ifndef _VX_SCHED_H +#define _VX_SCHED_H + + +#ifdef __KERNEL__ + +struct timespec; + +void vx_vsi_uptime(struct timespec *, struct timespec *); + + +struct vx_info; + +void vx_update_load(struct vx_info *); + + +int vx_tokens_recalc(struct _vx_sched_pc *, + unsigned long *, unsigned long *, int [2]); + +void vx_update_sched_param(struct _vx_sched *sched, + struct _vx_sched_pc *sched_pc); + +#endif /* __KERNEL__ */ +#else /* _VX_SCHED_H */ +#warning duplicate inclusion +#endif /* _VX_SCHED_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/signal_cmd.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/signal_cmd.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/signal_cmd.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/signal_cmd.h 2012-01-16 14:51:21.985408463 +0100 @@ -0,0 +1,43 @@ +#ifndef _VX_SIGNAL_CMD_H +#define _VX_SIGNAL_CMD_H + + +/* signalling vserver commands */ + +#define VCMD_ctx_kill VC_CMD(PROCTRL, 1, 0) +#define VCMD_wait_exit VC_CMD(EVENT, 99, 0) + +struct vcmd_ctx_kill_v0 { + int32_t pid; + int32_t sig; +}; + +struct vcmd_wait_exit_v0 { + int32_t reboot_cmd; + int32_t exit_code; +}; + +#ifdef __KERNEL__ + +extern int vc_ctx_kill(struct vx_info *, void __user *); +extern int vc_wait_exit(struct vx_info *, void __user *); + +#endif /* __KERNEL__ */ + +/* process alteration commands */ + +#define VCMD_get_pflags VC_CMD(PROCALT, 5, 0) +#define VCMD_set_pflags VC_CMD(PROCALT, 6, 0) + +struct vcmd_pflags_v0 { + uint32_t flagword; + uint32_t mask; +}; + +#ifdef __KERNEL__ + +extern int vc_get_pflags(uint32_t pid, void __user *); +extern int vc_set_pflags(uint32_t pid, void __user *); + +#endif /* __KERNEL__ */ +#endif /* _VX_SIGNAL_CMD_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/signal.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/signal.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/signal.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/signal.h 2012-01-16 14:51:21.985408463 +0100 @@ -0,0 +1,14 @@ +#ifndef _VX_SIGNAL_H +#define _VX_SIGNAL_H + + +#ifdef __KERNEL__ + +struct vx_info; + +int vx_info_kill(struct vx_info *, int, int); + +#endif /* __KERNEL__ */ +#else /* _VX_SIGNAL_H */ +#warning duplicate inclusion +#endif /* _VX_SIGNAL_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/space_cmd.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/space_cmd.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/space_cmd.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/space_cmd.h 2012-01-16 14:51:21.985408463 +0100 @@ -0,0 +1,38 @@ +#ifndef _VX_SPACE_CMD_H +#define _VX_SPACE_CMD_H + + +#define VCMD_enter_space_v0 VC_CMD(PROCALT, 1, 0) +#define VCMD_enter_space_v1 VC_CMD(PROCALT, 1, 1) +#define VCMD_enter_space VC_CMD(PROCALT, 1, 2) + +#define VCMD_set_space_v0 VC_CMD(PROCALT, 3, 0) +#define VCMD_set_space_v1 VC_CMD(PROCALT, 3, 1) +#define VCMD_set_space VC_CMD(PROCALT, 3, 2) + +#define VCMD_get_space_mask_v0 VC_CMD(PROCALT, 4, 0) + +#define VCMD_get_space_mask VC_CMD(VSPACE, 0, 1) +#define VCMD_get_space_default VC_CMD(VSPACE, 1, 0) + + +struct vcmd_space_mask_v1 { + uint64_t mask; +}; + +struct vcmd_space_mask_v2 { + uint64_t mask; + uint32_t index; +}; + + +#ifdef __KERNEL__ + +extern int vc_enter_space_v1(struct vx_info *, void __user *); +extern int vc_set_space_v1(struct vx_info *, void __user *); +extern int vc_enter_space(struct vx_info *, void __user *); +extern int vc_set_space(struct vx_info *, void __user *); +extern int vc_get_space_mask(void __user *, int); + +#endif /* __KERNEL__ */ +#endif /* _VX_SPACE_CMD_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/space.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/space.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/space.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/space.h 2012-01-16 14:51:21.985408463 +0100 @@ -0,0 +1,12 @@ +#ifndef _VX_SPACE_H +#define _VX_SPACE_H + +#include + +struct vx_info; + +int vx_set_space(struct vx_info *vxi, unsigned long mask, unsigned index); + +#else /* _VX_SPACE_H */ +#warning duplicate inclusion +#endif /* _VX_SPACE_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/switch.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/switch.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/switch.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/switch.h 2012-01-16 14:51:21.985408463 +0100 @@ -0,0 +1,98 @@ +#ifndef _VX_SWITCH_H +#define _VX_SWITCH_H + +#include + + +#define VC_CATEGORY(c) (((c) >> 24) & 0x3F) +#define VC_COMMAND(c) (((c) >> 16) & 0xFF) +#define VC_VERSION(c) ((c) & 0xFFF) + +#define VC_CMD(c, i, v) ((((VC_CAT_ ## c) & 0x3F) << 24) \ + | (((i) & 0xFF) << 16) | ((v) & 0xFFF)) + +/* + + Syscall Matrix V2.8 + + |VERSION|CREATE |MODIFY |MIGRATE|CONTROL|EXPERIM| |SPECIAL|SPECIAL| + |STATS |DESTROY|ALTER |CHANGE |LIMIT |TEST | | | | + |INFO |SETUP | |MOVE | | | | | | + -------+-------+-------+-------+-------+-------+-------+ +-------+-------+ + SYSTEM |VERSION|VSETUP |VHOST | | | | |DEVICE | | + HOST | 00| 01| 02| 03| 04| 05| | 06| 07| + -------+-------+-------+-------+-------+-------+-------+ +-------+-------+ + CPU | |VPROC |PROCALT|PROCMIG|PROCTRL| | |SCHED. | | + PROCESS| 08| 09| 10| 11| 12| 13| | 14| 15| + -------+-------+-------+-------+-------+-------+-------+ +-------+-------+ + MEMORY | | | | |MEMCTRL| | |SWAP | | + | 16| 17| 18| 19| 20| 21| | 22| 23| + -------+-------+-------+-------+-------+-------+-------+ +-------+-------+ + NETWORK| |VNET |NETALT |NETMIG |NETCTL | | |SERIAL | | + | 24| 25| 26| 27| 28| 29| | 30| 31| + -------+-------+-------+-------+-------+-------+-------+ +-------+-------+ + DISK | | | |TAGMIG |DLIMIT | | |INODE | | + VFS | 32| 33| 34| 35| 36| 37| | 38| 39| + -------+-------+-------+-------+-------+-------+-------+ +-------+-------+ + OTHER |VSTAT | | | | | | |VINFO | | + | 40| 41| 42| 43| 44| 45| | 46| 47| + =======+=======+=======+=======+=======+=======+=======+ +=======+=======+ + SPECIAL|EVENT | | | |FLAGS | | |VSPACE | | + | 48| 49| 50| 51| 52| 53| | 54| 55| + -------+-------+-------+-------+-------+-------+-------+ +-------+-------+ + SPECIAL|DEBUG | | | |RLIMIT |SYSCALL| | |COMPAT | + | 56| 57| 58| 59| 60|TEST 61| | 62| 63| + -------+-------+-------+-------+-------+-------+-------+ +-------+-------+ + +*/ + +#define VC_CAT_VERSION 0 + +#define VC_CAT_VSETUP 1 +#define VC_CAT_VHOST 2 + +#define VC_CAT_DEVICE 6 + +#define VC_CAT_VPROC 9 +#define VC_CAT_PROCALT 10 +#define VC_CAT_PROCMIG 11 +#define VC_CAT_PROCTRL 12 + +#define VC_CAT_SCHED 14 +#define VC_CAT_MEMCTRL 20 + +#define VC_CAT_VNET 25 +#define VC_CAT_NETALT 26 +#define VC_CAT_NETMIG 27 +#define VC_CAT_NETCTRL 28 + +#define VC_CAT_TAGMIG 35 +#define VC_CAT_DLIMIT 36 +#define VC_CAT_INODE 38 + +#define VC_CAT_VSTAT 40 +#define VC_CAT_VINFO 46 +#define VC_CAT_EVENT 48 + +#define VC_CAT_FLAGS 52 +#define VC_CAT_VSPACE 54 +#define VC_CAT_DEBUG 56 +#define VC_CAT_RLIMIT 60 + +#define VC_CAT_SYSTEST 61 +#define VC_CAT_COMPAT 63 + +/* query version */ + +#define VCMD_get_version VC_CMD(VERSION, 0, 0) +#define VCMD_get_vci VC_CMD(VERSION, 1, 0) + + +#ifdef __KERNEL__ + +#include + +#endif /* __KERNEL__ */ + +#endif /* _VX_SWITCH_H */ + diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/tag_cmd.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/tag_cmd.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/tag_cmd.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/tag_cmd.h 2012-01-16 14:51:21.989408449 +0100 @@ -0,0 +1,22 @@ +#ifndef _VX_TAG_CMD_H +#define _VX_TAG_CMD_H + + +/* vinfo commands */ + +#define VCMD_task_tag VC_CMD(VINFO, 3, 0) + +#ifdef __KERNEL__ +extern int vc_task_tag(uint32_t); + +#endif /* __KERNEL__ */ + +/* context commands */ + +#define VCMD_tag_migrate VC_CMD(TAGMIG, 1, 0) + +#ifdef __KERNEL__ +extern int vc_tag_migrate(uint32_t); + +#endif /* __KERNEL__ */ +#endif /* _VX_TAG_CMD_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/tag.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/tag.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vserver/tag.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vserver/tag.h 2012-01-16 14:51:21.989408449 +0100 @@ -0,0 +1,143 @@ +#ifndef _DX_TAG_H +#define _DX_TAG_H + +#include + + +#define DX_TAG(in) (IS_TAGGED(in)) + + +#ifdef CONFIG_TAG_NFSD +#define DX_TAG_NFSD 1 +#else +#define DX_TAG_NFSD 0 +#endif + + +#ifdef CONFIG_TAGGING_NONE + +#define MAX_UID 0xFFFFFFFF +#define MAX_GID 0xFFFFFFFF + +#define INOTAG_TAG(cond, uid, gid, tag) (0) + +#define TAGINO_UID(cond, uid, tag) (uid) +#define TAGINO_GID(cond, gid, tag) (gid) + +#endif + + +#ifdef CONFIG_TAGGING_GID16 + +#define MAX_UID 0xFFFFFFFF +#define MAX_GID 0x0000FFFF + +#define INOTAG_TAG(cond, uid, gid, tag) \ + ((cond) ? (((gid) >> 16) & 0xFFFF) : 0) + +#define TAGINO_UID(cond, uid, tag) (uid) +#define TAGINO_GID(cond, gid, tag) \ + ((cond) ? (((gid) & 0xFFFF) | ((tag) << 16)) : (gid)) + +#endif + + +#ifdef CONFIG_TAGGING_ID24 + +#define MAX_UID 0x00FFFFFF +#define MAX_GID 0x00FFFFFF + +#define INOTAG_TAG(cond, uid, gid, tag) \ + ((cond) ? ((((uid) >> 16) & 0xFF00) | (((gid) >> 24) & 0xFF)) : 0) + +#define TAGINO_UID(cond, uid, tag) \ + ((cond) ? (((uid) & 0xFFFFFF) | (((tag) & 0xFF00) << 16)) : (uid)) +#define TAGINO_GID(cond, gid, tag) \ + ((cond) ? (((gid) & 0xFFFFFF) | (((tag) & 0x00FF) << 24)) : (gid)) + +#endif + + +#ifdef CONFIG_TAGGING_UID16 + +#define MAX_UID 0x0000FFFF +#define MAX_GID 0xFFFFFFFF + +#define INOTAG_TAG(cond, uid, gid, tag) \ + ((cond) ? (((uid) >> 16) & 0xFFFF) : 0) + +#define TAGINO_UID(cond, uid, tag) \ + ((cond) ? (((uid) & 0xFFFF) | ((tag) << 16)) : (uid)) +#define TAGINO_GID(cond, gid, tag) (gid) + +#endif + + +#ifdef CONFIG_TAGGING_INTERN + +#define MAX_UID 0xFFFFFFFF +#define MAX_GID 0xFFFFFFFF + +#define INOTAG_TAG(cond, uid, gid, tag) \ + ((cond) ? (tag) : 0) + +#define TAGINO_UID(cond, uid, tag) (uid) +#define TAGINO_GID(cond, gid, tag) (gid) + +#endif + + +#ifndef CONFIG_TAGGING_NONE +#define dx_current_fstag(sb) \ + ((sb)->s_flags & MS_TAGGED ? dx_current_tag() : 0) +#else +#define dx_current_fstag(sb) (0) +#endif + +#ifndef CONFIG_TAGGING_INTERN +#define TAGINO_TAG(cond, tag) (0) +#else +#define TAGINO_TAG(cond, tag) ((cond) ? (tag) : 0) +#endif + +#define INOTAG_UID(cond, uid, gid) \ + ((cond) ? ((uid) & MAX_UID) : (uid)) +#define INOTAG_GID(cond, uid, gid) \ + ((cond) ? ((gid) & MAX_GID) : (gid)) + + +static inline uid_t dx_map_uid(uid_t uid) +{ + if ((uid > MAX_UID) && (uid != -1)) + uid = -2; + return (uid & MAX_UID); +} + +static inline gid_t dx_map_gid(gid_t gid) +{ + if ((gid > MAX_GID) && (gid != -1)) + gid = -2; + return (gid & MAX_GID); +} + +struct peer_tag { + int32_t xid; + int32_t nid; +}; + +#define dx_notagcheck(sb) ((sb) && ((sb)->s_flags & MS_NOTAGCHECK)) + +int dx_parse_tag(char *string, tag_t *tag, int remove, int *mnt_flags, + unsigned long *flags); + +#ifdef CONFIG_PROPAGATE + +void __dx_propagate_tag(struct nameidata *nd, struct inode *inode); + +#define dx_propagate_tag(n, i) __dx_propagate_tag(n, i) + +#else +#define dx_propagate_tag(n, i) do { } while (0) +#endif + +#endif /* _DX_TAG_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vs_inet6.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_inet6.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vs_inet6.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_inet6.h 2012-01-16 14:51:21.965408533 +0100 @@ -0,0 +1,246 @@ +#ifndef _VS_INET6_H +#define _VS_INET6_H + +#include "vserver/base.h" +#include "vserver/network.h" +#include "vserver/debug.h" + +#include + +#define NXAV6(a) &(a)->ip, &(a)->mask, (a)->prefix, (a)->type +#define NXAV6_FMT "[%pI6/%pI6/%d:%04x]" + + +#ifdef CONFIG_IPV6 + +static inline +int v6_addr_match(struct nx_addr_v6 *nxa, + const struct in6_addr *addr, uint16_t mask) +{ + int ret = 0; + + switch (nxa->type & mask) { + case NXA_TYPE_MASK: + ret = ipv6_masked_addr_cmp(&nxa->ip, &nxa->mask, addr); + break; + case NXA_TYPE_ADDR: + ret = ipv6_addr_equal(&nxa->ip, addr); + break; + case NXA_TYPE_ANY: + ret = 1; + break; + } + vxdprintk(VXD_CBIT(net, 0), + "v6_addr_match(%p" NXAV6_FMT ",%pI6,%04x) = %d", + nxa, NXAV6(nxa), addr, mask, ret); + return ret; +} + +static inline +int v6_addr_in_nx_info(struct nx_info *nxi, + const struct in6_addr *addr, uint16_t mask) +{ + struct nx_addr_v6 *nxa; + int ret = 1; + + if (!nxi) + goto out; + for (nxa = &nxi->v6; nxa; nxa = nxa->next) + if (v6_addr_match(nxa, addr, mask)) + goto out; + ret = 0; +out: + vxdprintk(VXD_CBIT(net, 0), + "v6_addr_in_nx_info(%p[#%u],%pI6,%04x) = %d", + nxi, nxi ? nxi->nx_id : 0, addr, mask, ret); + return ret; +} + +static inline +int v6_nx_addr_match(struct nx_addr_v6 *nxa, struct nx_addr_v6 *addr, uint16_t mask) +{ + /* FIXME: needs full range checks */ + return v6_addr_match(nxa, &addr->ip, mask); +} + +static inline +int v6_nx_addr_in_nx_info(struct nx_info *nxi, struct nx_addr_v6 *nxa, uint16_t mask) +{ + struct nx_addr_v6 *ptr; + + for (ptr = &nxi->v6; ptr; ptr = ptr->next) + if (v6_nx_addr_match(ptr, nxa, mask)) + return 1; + return 0; +} + + +/* + * Check if a given address matches for a socket + * + * nxi: the socket's nx_info if any + * addr: to be verified address + */ +static inline +int v6_sock_addr_match ( + struct nx_info *nxi, + struct inet_sock *inet, + struct in6_addr *addr) +{ + struct sock *sk = &inet->sk; + struct in6_addr *saddr = inet6_rcv_saddr(sk); + + if (!ipv6_addr_any(addr) && + ipv6_addr_equal(saddr, addr)) + return 1; + if (ipv6_addr_any(saddr)) + return v6_addr_in_nx_info(nxi, addr, -1); + return 0; +} + +/* + * check if address is covered by socket + * + * sk: the socket to check against + * addr: the address in question (must be != 0) + */ + +static inline +int __v6_addr_match_socket(const struct sock *sk, struct nx_addr_v6 *nxa) +{ + struct nx_info *nxi = sk->sk_nx_info; + struct in6_addr *saddr = inet6_rcv_saddr(sk); + + vxdprintk(VXD_CBIT(net, 5), + "__v6_addr_in_socket(%p," NXAV6_FMT ") %p:%pI6 %p;%lx", + sk, NXAV6(nxa), nxi, saddr, sk->sk_socket, + (sk->sk_socket?sk->sk_socket->flags:0)); + + if (!ipv6_addr_any(saddr)) { /* direct address match */ + return v6_addr_match(nxa, saddr, -1); + } else if (nxi) { /* match against nx_info */ + return v6_nx_addr_in_nx_info(nxi, nxa, -1); + } else { /* unrestricted any socket */ + return 1; + } +} + + +/* inet related checks and helpers */ + + +struct in_ifaddr; +struct net_device; +struct sock; + + +#include +#include +#include + + +int dev_in_nx_info(struct net_device *, struct nx_info *); +int v6_dev_in_nx_info(struct net_device *, struct nx_info *); +int nx_v6_addr_conflict(struct nx_info *, struct nx_info *); + + + +static inline +int v6_ifa_in_nx_info(struct inet6_ifaddr *ifa, struct nx_info *nxi) +{ + if (!nxi) + return 1; + if (!ifa) + return 0; + return v6_addr_in_nx_info(nxi, &ifa->addr, -1); +} + +static inline +int nx_v6_ifa_visible(struct nx_info *nxi, struct inet6_ifaddr *ifa) +{ + vxdprintk(VXD_CBIT(net, 1), "nx_v6_ifa_visible(%p[#%u],%p) %d", + nxi, nxi ? nxi->nx_id : 0, ifa, + nxi ? v6_ifa_in_nx_info(ifa, nxi) : 0); + + if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0)) + return 1; + if (v6_ifa_in_nx_info(ifa, nxi)) + return 1; + return 0; +} + + +struct nx_v6_sock_addr { + struct in6_addr saddr; /* Address used for validation */ + struct in6_addr baddr; /* Address used for socket bind */ +}; + +static inline +int v6_map_sock_addr(struct inet_sock *inet, struct sockaddr_in6 *addr, + struct nx_v6_sock_addr *nsa) +{ + // struct sock *sk = &inet->sk; + // struct nx_info *nxi = sk->sk_nx_info; + struct in6_addr saddr = addr->sin6_addr; + struct in6_addr baddr = saddr; + + nsa->saddr = saddr; + nsa->baddr = baddr; + return 0; +} + +static inline +void v6_set_sock_addr(struct inet_sock *inet, struct nx_v6_sock_addr *nsa) +{ + // struct sock *sk = &inet->sk; + // struct in6_addr *saddr = inet6_rcv_saddr(sk); + + // *saddr = nsa->baddr; + // inet->saddr = nsa->baddr; +} + +static inline +int nx_info_has_v6(struct nx_info *nxi) +{ + if (!nxi) + return 1; + if (NX_IPV6(nxi)) + return 1; + return 0; +} + +#else /* CONFIG_IPV6 */ + +static inline +int nx_v6_dev_visible(struct nx_info *n, struct net_device *d) +{ + return 1; +} + + +static inline +int nx_v6_addr_conflict(struct nx_info *n, uint32_t a, const struct sock *s) +{ + return 1; +} + +static inline +int v6_ifa_in_nx_info(struct in_ifaddr *a, struct nx_info *n) +{ + return 1; +} + +static inline +int nx_info_has_v6(struct nx_info *nxi) +{ + return 0; +} + +#endif /* CONFIG_IPV6 */ + +#define current_nx_info_has_v6() \ + nx_info_has_v6(current_nx_info()) + +#else +#warning duplicate inclusion +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vs_inet.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_inet.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vs_inet.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_inet.h 2012-01-16 14:51:21.961408547 +0100 @@ -0,0 +1,342 @@ +#ifndef _VS_INET_H +#define _VS_INET_H + +#include "vserver/base.h" +#include "vserver/network.h" +#include "vserver/debug.h" + +#define IPI_LOOPBACK htonl(INADDR_LOOPBACK) + +#define NXAV4(a) NIPQUAD((a)->ip[0]), NIPQUAD((a)->ip[1]), \ + NIPQUAD((a)->mask), (a)->type +#define NXAV4_FMT "[" NIPQUAD_FMT "-" NIPQUAD_FMT "/" NIPQUAD_FMT ":%04x]" + + +static inline +int v4_addr_match(struct nx_addr_v4 *nxa, __be32 addr, uint16_t tmask) +{ + __be32 ip = nxa->ip[0].s_addr; + __be32 mask = nxa->mask.s_addr; + __be32 bcast = ip | ~mask; + int ret = 0; + + switch (nxa->type & tmask) { + case NXA_TYPE_MASK: + ret = (ip == (addr & mask)); + break; + case NXA_TYPE_ADDR: + ret = 3; + if (addr == ip) + break; + /* fall through to broadcast */ + case NXA_MOD_BCAST: + ret = ((tmask & NXA_MOD_BCAST) && (addr == bcast)); + break; + case NXA_TYPE_RANGE: + ret = ((nxa->ip[0].s_addr <= addr) && + (nxa->ip[1].s_addr > addr)); + break; + case NXA_TYPE_ANY: + ret = 2; + break; + } + + vxdprintk(VXD_CBIT(net, 0), + "v4_addr_match(%p" NXAV4_FMT "," NIPQUAD_FMT ",%04x) = %d", + nxa, NXAV4(nxa), NIPQUAD(addr), tmask, ret); + return ret; +} + +static inline +int v4_addr_in_nx_info(struct nx_info *nxi, __be32 addr, uint16_t tmask) +{ + struct nx_addr_v4 *nxa; + int ret = 1; + + if (!nxi) + goto out; + + ret = 2; + /* allow 127.0.0.1 when remapping lback */ + if ((tmask & NXA_LOOPBACK) && + (addr == IPI_LOOPBACK) && + nx_info_flags(nxi, NXF_LBACK_REMAP, 0)) + goto out; + ret = 3; + /* check for lback address */ + if ((tmask & NXA_MOD_LBACK) && + (nxi->v4_lback.s_addr == addr)) + goto out; + ret = 4; + /* check for broadcast address */ + if ((tmask & NXA_MOD_BCAST) && + (nxi->v4_bcast.s_addr == addr)) + goto out; + ret = 5; + /* check for v4 addresses */ + for (nxa = &nxi->v4; nxa; nxa = nxa->next) + if (v4_addr_match(nxa, addr, tmask)) + goto out; + ret = 0; +out: + vxdprintk(VXD_CBIT(net, 0), + "v4_addr_in_nx_info(%p[#%u]," NIPQUAD_FMT ",%04x) = %d", + nxi, nxi ? nxi->nx_id : 0, NIPQUAD(addr), tmask, ret); + return ret; +} + +static inline +int v4_nx_addr_match(struct nx_addr_v4 *nxa, struct nx_addr_v4 *addr, uint16_t mask) +{ + /* FIXME: needs full range checks */ + return v4_addr_match(nxa, addr->ip[0].s_addr, mask); +} + +static inline +int v4_nx_addr_in_nx_info(struct nx_info *nxi, struct nx_addr_v4 *nxa, uint16_t mask) +{ + struct nx_addr_v4 *ptr; + + for (ptr = &nxi->v4; ptr; ptr = ptr->next) + if (v4_nx_addr_match(ptr, nxa, mask)) + return 1; + return 0; +} + +#include + +/* + * Check if a given address matches for a socket + * + * nxi: the socket's nx_info if any + * addr: to be verified address + */ +static inline +int v4_sock_addr_match ( + struct nx_info *nxi, + struct inet_sock *inet, + __be32 addr) +{ + __be32 saddr = inet->rcv_saddr; + __be32 bcast = nxi ? nxi->v4_bcast.s_addr : INADDR_BROADCAST; + + if (addr && (saddr == addr || bcast == addr)) + return 1; + if (!saddr) + return v4_addr_in_nx_info(nxi, addr, NXA_MASK_BIND); + return 0; +} + + +/* inet related checks and helpers */ + + +struct in_ifaddr; +struct net_device; +struct sock; + +#ifdef CONFIG_INET + +#include +#include +#include +#include + + +int dev_in_nx_info(struct net_device *, struct nx_info *); +int v4_dev_in_nx_info(struct net_device *, struct nx_info *); +int nx_v4_addr_conflict(struct nx_info *, struct nx_info *); + + +/* + * check if address is covered by socket + * + * sk: the socket to check against + * addr: the address in question (must be != 0) + */ + +static inline +int __v4_addr_match_socket(const struct sock *sk, struct nx_addr_v4 *nxa) +{ + struct nx_info *nxi = sk->sk_nx_info; + __be32 saddr = inet_rcv_saddr(sk); + + vxdprintk(VXD_CBIT(net, 5), + "__v4_addr_in_socket(%p," NXAV4_FMT ") %p:" NIPQUAD_FMT " %p;%lx", + sk, NXAV4(nxa), nxi, NIPQUAD(saddr), sk->sk_socket, + (sk->sk_socket?sk->sk_socket->flags:0)); + + if (saddr) { /* direct address match */ + return v4_addr_match(nxa, saddr, -1); + } else if (nxi) { /* match against nx_info */ + return v4_nx_addr_in_nx_info(nxi, nxa, -1); + } else { /* unrestricted any socket */ + return 1; + } +} + + + +static inline +int nx_dev_visible(struct nx_info *nxi, struct net_device *dev) +{ + vxdprintk(VXD_CBIT(net, 1), "nx_dev_visible(%p[#%u],%p »%s«) %d", + nxi, nxi ? nxi->nx_id : 0, dev, dev->name, + nxi ? dev_in_nx_info(dev, nxi) : 0); + + if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0)) + return 1; + if (dev_in_nx_info(dev, nxi)) + return 1; + return 0; +} + + +static inline +int v4_ifa_in_nx_info(struct in_ifaddr *ifa, struct nx_info *nxi) +{ + if (!nxi) + return 1; + if (!ifa) + return 0; + return v4_addr_in_nx_info(nxi, ifa->ifa_local, NXA_MASK_SHOW); +} + +static inline +int nx_v4_ifa_visible(struct nx_info *nxi, struct in_ifaddr *ifa) +{ + vxdprintk(VXD_CBIT(net, 1), "nx_v4_ifa_visible(%p[#%u],%p) %d", + nxi, nxi ? nxi->nx_id : 0, ifa, + nxi ? v4_ifa_in_nx_info(ifa, nxi) : 0); + + if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0)) + return 1; + if (v4_ifa_in_nx_info(ifa, nxi)) + return 1; + return 0; +} + + +struct nx_v4_sock_addr { + __be32 saddr; /* Address used for validation */ + __be32 baddr; /* Address used for socket bind */ +}; + +static inline +int v4_map_sock_addr(struct inet_sock *inet, struct sockaddr_in *addr, + struct nx_v4_sock_addr *nsa) +{ + struct sock *sk = &inet->sk; + struct nx_info *nxi = sk->sk_nx_info; + __be32 saddr = addr->sin_addr.s_addr; + __be32 baddr = saddr; + + vxdprintk(VXD_CBIT(net, 3), + "inet_bind(%p)* %p,%p;%lx " NIPQUAD_FMT, + sk, sk->sk_nx_info, sk->sk_socket, + (sk->sk_socket ? sk->sk_socket->flags : 0), + NIPQUAD(saddr)); + + if (nxi) { + if (saddr == INADDR_ANY) { + if (nx_info_flags(nxi, NXF_SINGLE_IP, 0)) + baddr = nxi->v4.ip[0].s_addr; + } else if (saddr == IPI_LOOPBACK) { + if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0)) + baddr = nxi->v4_lback.s_addr; + } else { /* normal address bind */ + if (!v4_addr_in_nx_info(nxi, saddr, NXA_MASK_BIND)) + return -EADDRNOTAVAIL; + } + } + + vxdprintk(VXD_CBIT(net, 3), + "inet_bind(%p) " NIPQUAD_FMT ", " NIPQUAD_FMT, + sk, NIPQUAD(saddr), NIPQUAD(baddr)); + + nsa->saddr = saddr; + nsa->baddr = baddr; + return 0; +} + +static inline +void v4_set_sock_addr(struct inet_sock *inet, struct nx_v4_sock_addr *nsa) +{ + inet->saddr = nsa->baddr; + inet->rcv_saddr = nsa->baddr; +} + + +/* + * helper to simplify inet_lookup_listener + * + * nxi: the socket's nx_info if any + * addr: to be verified address + * saddr: socket address + */ +static inline int v4_inet_addr_match ( + struct nx_info *nxi, + __be32 addr, + __be32 saddr) +{ + if (addr && (saddr == addr)) + return 1; + if (!saddr) + return nxi ? v4_addr_in_nx_info(nxi, addr, NXA_MASK_BIND) : 1; + return 0; +} + +static inline __be32 nx_map_sock_lback(struct nx_info *nxi, __be32 addr) +{ + if (nx_info_flags(nxi, NXF_HIDE_LBACK, 0) && + (addr == nxi->v4_lback.s_addr)) + return IPI_LOOPBACK; + return addr; +} + +static inline +int nx_info_has_v4(struct nx_info *nxi) +{ + if (!nxi) + return 1; + if (NX_IPV4(nxi)) + return 1; + if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0)) + return 1; + return 0; +} + +#else /* CONFIG_INET */ + +static inline +int nx_dev_visible(struct nx_info *n, struct net_device *d) +{ + return 1; +} + +static inline +int nx_v4_addr_conflict(struct nx_info *n, uint32_t a, const struct sock *s) +{ + return 1; +} + +static inline +int v4_ifa_in_nx_info(struct in_ifaddr *a, struct nx_info *n) +{ + return 1; +} + +static inline +int nx_info_has_v4(struct nx_info *nxi) +{ + return 0; +} + +#endif /* CONFIG_INET */ + +#define current_nx_info_has_v4() \ + nx_info_has_v4(current_nx_info()) + +#else +// #warning duplicate inclusion +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vs_limit.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_limit.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vs_limit.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_limit.h 2012-01-16 14:51:21.965408533 +0100 @@ -0,0 +1,140 @@ +#ifndef _VS_LIMIT_H +#define _VS_LIMIT_H + +#include "vserver/limit.h" +#include "vserver/base.h" +#include "vserver/context.h" +#include "vserver/debug.h" +#include "vserver/context.h" +#include "vserver/limit_int.h" + + +#define vx_acc_cres(v, d, p, r) \ + __vx_acc_cres(v, r, d, p, __FILE__, __LINE__) + +#define vx_acc_cres_cond(x, d, p, r) \ + __vx_acc_cres(((x) == vx_current_xid()) ? current_vx_info() : 0, \ + r, d, p, __FILE__, __LINE__) + + +#define vx_add_cres(v, a, p, r) \ + __vx_add_cres(v, r, a, p, __FILE__, __LINE__) +#define vx_sub_cres(v, a, p, r) vx_add_cres(v, -(a), p, r) + +#define vx_add_cres_cond(x, a, p, r) \ + __vx_add_cres(((x) == vx_current_xid()) ? current_vx_info() : 0, \ + r, a, p, __FILE__, __LINE__) +#define vx_sub_cres_cond(x, a, p, r) vx_add_cres_cond(x, -(a), p, r) + + +/* process and file limits */ + +#define vx_nproc_inc(p) \ + vx_acc_cres((p)->vx_info, 1, p, RLIMIT_NPROC) + +#define vx_nproc_dec(p) \ + vx_acc_cres((p)->vx_info,-1, p, RLIMIT_NPROC) + +#define vx_files_inc(f) \ + vx_acc_cres_cond((f)->f_xid, 1, f, RLIMIT_NOFILE) + +#define vx_files_dec(f) \ + vx_acc_cres_cond((f)->f_xid,-1, f, RLIMIT_NOFILE) + +#define vx_locks_inc(l) \ + vx_acc_cres_cond((l)->fl_xid, 1, l, RLIMIT_LOCKS) + +#define vx_locks_dec(l) \ + vx_acc_cres_cond((l)->fl_xid,-1, l, RLIMIT_LOCKS) + +#define vx_openfd_inc(f) \ + vx_acc_cres(current_vx_info(), 1, (void *)(long)(f), VLIMIT_OPENFD) + +#define vx_openfd_dec(f) \ + vx_acc_cres(current_vx_info(),-1, (void *)(long)(f), VLIMIT_OPENFD) + + +#define vx_cres_avail(v, n, r) \ + __vx_cres_avail(v, r, n, __FILE__, __LINE__) + + +#define vx_nproc_avail(n) \ + vx_cres_avail(current_vx_info(), n, RLIMIT_NPROC) + +#define vx_files_avail(n) \ + vx_cres_avail(current_vx_info(), n, RLIMIT_NOFILE) + +#define vx_locks_avail(n) \ + vx_cres_avail(current_vx_info(), n, RLIMIT_LOCKS) + +#define vx_openfd_avail(n) \ + vx_cres_avail(current_vx_info(), n, VLIMIT_OPENFD) + + +/* dentry limits */ + +#define vx_dentry_inc(d) do { \ + if (atomic_read(&d->d_count) == 1) \ + vx_acc_cres(current_vx_info(), 1, d, VLIMIT_DENTRY); \ + } while (0) + +#define vx_dentry_dec(d) do { \ + if (atomic_read(&d->d_count) == 0) \ + vx_acc_cres(current_vx_info(),-1, d, VLIMIT_DENTRY); \ + } while (0) + +#define vx_dentry_avail(n) \ + vx_cres_avail(current_vx_info(), n, VLIMIT_DENTRY) + + +/* socket limits */ + +#define vx_sock_inc(s) \ + vx_acc_cres((s)->sk_vx_info, 1, s, VLIMIT_NSOCK) + +#define vx_sock_dec(s) \ + vx_acc_cres((s)->sk_vx_info,-1, s, VLIMIT_NSOCK) + +#define vx_sock_avail(n) \ + vx_cres_avail(current_vx_info(), n, VLIMIT_NSOCK) + + +/* ipc resource limits */ + +#define vx_ipcmsg_add(v, u, a) \ + vx_add_cres(v, a, u, RLIMIT_MSGQUEUE) + +#define vx_ipcmsg_sub(v, u, a) \ + vx_sub_cres(v, a, u, RLIMIT_MSGQUEUE) + +#define vx_ipcmsg_avail(v, a) \ + vx_cres_avail(v, a, RLIMIT_MSGQUEUE) + + +#define vx_ipcshm_add(v, k, a) \ + vx_add_cres(v, a, (void *)(long)(k), VLIMIT_SHMEM) + +#define vx_ipcshm_sub(v, k, a) \ + vx_sub_cres(v, a, (void *)(long)(k), VLIMIT_SHMEM) + +#define vx_ipcshm_avail(v, a) \ + vx_cres_avail(v, a, VLIMIT_SHMEM) + + +#define vx_semary_inc(a) \ + vx_acc_cres(current_vx_info(), 1, a, VLIMIT_SEMARY) + +#define vx_semary_dec(a) \ + vx_acc_cres(current_vx_info(), -1, a, VLIMIT_SEMARY) + + +#define vx_nsems_add(a,n) \ + vx_add_cres(current_vx_info(), n, a, VLIMIT_NSEMS) + +#define vx_nsems_sub(a,n) \ + vx_sub_cres(current_vx_info(), n, a, VLIMIT_NSEMS) + + +#else +#warning duplicate inclusion +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vs_memory.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_memory.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vs_memory.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_memory.h 2012-01-16 14:51:21.965408533 +0100 @@ -0,0 +1,159 @@ +#ifndef _VS_MEMORY_H +#define _VS_MEMORY_H + +#include "vserver/limit.h" +#include "vserver/base.h" +#include "vserver/context.h" +#include "vserver/debug.h" +#include "vserver/context.h" +#include "vserver/limit_int.h" + + +#define __acc_add_long(a, v) (*(v) += (a)) +#define __acc_inc_long(v) (++*(v)) +#define __acc_dec_long(v) (--*(v)) + +#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS +#define __acc_add_atomic(a, v) atomic_long_add(a, v) +#define __acc_inc_atomic(v) atomic_long_inc(v) +#define __acc_dec_atomic(v) atomic_long_dec(v) +#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ +#define __acc_add_atomic(a, v) __acc_add_long(a, v) +#define __acc_inc_atomic(v) __acc_inc_long(v) +#define __acc_dec_atomic(v) __acc_dec_long(v) +#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ + + +#define vx_acc_page(m, d, v, r) do { \ + if ((d) > 0) \ + __acc_inc_long(&(m)->v); \ + else \ + __acc_dec_long(&(m)->v); \ + __vx_acc_cres(m->mm_vx_info, r, d, m, __FILE__, __LINE__); \ +} while (0) + +#define vx_acc_page_atomic(m, d, v, r) do { \ + if ((d) > 0) \ + __acc_inc_atomic(&(m)->v); \ + else \ + __acc_dec_atomic(&(m)->v); \ + __vx_acc_cres(m->mm_vx_info, r, d, m, __FILE__, __LINE__); \ +} while (0) + + +#define vx_acc_pages(m, p, v, r) do { \ + unsigned long __p = (p); \ + __acc_add_long(__p, &(m)->v); \ + __vx_add_cres(m->mm_vx_info, r, __p, m, __FILE__, __LINE__); \ +} while (0) + +#define vx_acc_pages_atomic(m, p, v, r) do { \ + unsigned long __p = (p); \ + __acc_add_atomic(__p, &(m)->v); \ + __vx_add_cres(m->mm_vx_info, r, __p, m, __FILE__, __LINE__); \ +} while (0) + + + +#define vx_acc_vmpage(m, d) \ + vx_acc_page(m, d, total_vm, RLIMIT_AS) +#define vx_acc_vmlpage(m, d) \ + vx_acc_page(m, d, locked_vm, RLIMIT_MEMLOCK) +#define vx_acc_file_rsspage(m, d) \ + vx_acc_page_atomic(m, d, _file_rss, VLIMIT_MAPPED) +#define vx_acc_anon_rsspage(m, d) \ + vx_acc_page_atomic(m, d, _anon_rss, VLIMIT_ANON) + +#define vx_acc_vmpages(m, p) \ + vx_acc_pages(m, p, total_vm, RLIMIT_AS) +#define vx_acc_vmlpages(m, p) \ + vx_acc_pages(m, p, locked_vm, RLIMIT_MEMLOCK) +#define vx_acc_file_rsspages(m, p) \ + vx_acc_pages_atomic(m, p, _file_rss, VLIMIT_MAPPED) +#define vx_acc_anon_rsspages(m, p) \ + vx_acc_pages_atomic(m, p, _anon_rss, VLIMIT_ANON) + +#define vx_pages_add(s, r, p) __vx_add_cres(s, r, p, 0, __FILE__, __LINE__) +#define vx_pages_sub(s, r, p) vx_pages_add(s, r, -(p)) + +#define vx_vmpages_inc(m) vx_acc_vmpage(m, 1) +#define vx_vmpages_dec(m) vx_acc_vmpage(m, -1) +#define vx_vmpages_add(m, p) vx_acc_vmpages(m, p) +#define vx_vmpages_sub(m, p) vx_acc_vmpages(m, -(p)) + +#define vx_vmlocked_inc(m) vx_acc_vmlpage(m, 1) +#define vx_vmlocked_dec(m) vx_acc_vmlpage(m, -1) +#define vx_vmlocked_add(m, p) vx_acc_vmlpages(m, p) +#define vx_vmlocked_sub(m, p) vx_acc_vmlpages(m, -(p)) + +#define vx_file_rsspages_inc(m) vx_acc_file_rsspage(m, 1) +#define vx_file_rsspages_dec(m) vx_acc_file_rsspage(m, -1) +#define vx_file_rsspages_add(m, p) vx_acc_file_rsspages(m, p) +#define vx_file_rsspages_sub(m, p) vx_acc_file_rsspages(m, -(p)) + +#define vx_anon_rsspages_inc(m) vx_acc_anon_rsspage(m, 1) +#define vx_anon_rsspages_dec(m) vx_acc_anon_rsspage(m, -1) +#define vx_anon_rsspages_add(m, p) vx_acc_anon_rsspages(m, p) +#define vx_anon_rsspages_sub(m, p) vx_acc_anon_rsspages(m, -(p)) + + +#define vx_pages_avail(m, p, r) \ + __vx_cres_avail((m)->mm_vx_info, r, p, __FILE__, __LINE__) + +#define vx_vmpages_avail(m, p) vx_pages_avail(m, p, RLIMIT_AS) +#define vx_vmlocked_avail(m, p) vx_pages_avail(m, p, RLIMIT_MEMLOCK) +#define vx_anon_avail(m, p) vx_pages_avail(m, p, VLIMIT_ANON) +#define vx_mapped_avail(m, p) vx_pages_avail(m, p, VLIMIT_MAPPED) + +#define vx_rss_avail(m, p) \ + __vx_cres_array_avail((m)->mm_vx_info, VLA_RSS, p, __FILE__, __LINE__) + + +enum { + VXPT_UNKNOWN = 0, + VXPT_ANON, + VXPT_NONE, + VXPT_FILE, + VXPT_SWAP, + VXPT_WRITE +}; + +#if 0 +#define vx_page_fault(mm, vma, type, ret) +#else + +static inline +void __vx_page_fault(struct mm_struct *mm, + struct vm_area_struct *vma, int type, int ret) +{ + struct vx_info *vxi = mm->mm_vx_info; + int what; +/* + static char *page_type[6] = + { "UNKNOWN", "ANON", "NONE", "FILE", "SWAP", "WRITE" }; + static char *page_what[4] = + { "FAULT_OOM", "FAULT_SIGBUS", "FAULT_MINOR", "FAULT_MAJOR" }; +*/ + + if (!vxi) + return; + + what = (ret & 0x3); + +/* printk("[%d] page[%d][%d] %2x %s %s\n", vxi->vx_id, + type, what, ret, page_type[type], page_what[what]); +*/ + if (ret & VM_FAULT_WRITE) + what |= 0x4; + atomic_inc(&vxi->cacct.page[type][what]); +} + +#define vx_page_fault(mm, vma, type, ret) __vx_page_fault(mm, vma, type, ret) +#endif + + +extern unsigned long vx_badness(struct task_struct *task, struct mm_struct *mm); + +#else +#warning duplicate inclusion +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vs_network.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_network.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vs_network.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_network.h 2012-01-16 14:51:21.965408533 +0100 @@ -0,0 +1,169 @@ +#ifndef _NX_VS_NETWORK_H +#define _NX_VS_NETWORK_H + +#include "vserver/context.h" +#include "vserver/network.h" +#include "vserver/base.h" +#include "vserver/check.h" +#include "vserver/debug.h" + +#include + + +#define get_nx_info(i) __get_nx_info(i, __FILE__, __LINE__) + +static inline struct nx_info *__get_nx_info(struct nx_info *nxi, + const char *_file, int _line) +{ + if (!nxi) + return NULL; + + vxlprintk(VXD_CBIT(nid, 2), "get_nx_info(%p[#%d.%d])", + nxi, nxi ? nxi->nx_id : 0, + nxi ? atomic_read(&nxi->nx_usecnt) : 0, + _file, _line); + + atomic_inc(&nxi->nx_usecnt); + return nxi; +} + + +extern void free_nx_info(struct nx_info *); + +#define put_nx_info(i) __put_nx_info(i, __FILE__, __LINE__) + +static inline void __put_nx_info(struct nx_info *nxi, const char *_file, int _line) +{ + if (!nxi) + return; + + vxlprintk(VXD_CBIT(nid, 2), "put_nx_info(%p[#%d.%d])", + nxi, nxi ? nxi->nx_id : 0, + nxi ? atomic_read(&nxi->nx_usecnt) : 0, + _file, _line); + + if (atomic_dec_and_test(&nxi->nx_usecnt)) + free_nx_info(nxi); +} + + +#define init_nx_info(p, i) __init_nx_info(p, i, __FILE__, __LINE__) + +static inline void __init_nx_info(struct nx_info **nxp, struct nx_info *nxi, + const char *_file, int _line) +{ + if (nxi) { + vxlprintk(VXD_CBIT(nid, 3), + "init_nx_info(%p[#%d.%d])", + nxi, nxi ? nxi->nx_id : 0, + nxi ? atomic_read(&nxi->nx_usecnt) : 0, + _file, _line); + + atomic_inc(&nxi->nx_usecnt); + } + *nxp = nxi; +} + + +#define set_nx_info(p, i) __set_nx_info(p, i, __FILE__, __LINE__) + +static inline void __set_nx_info(struct nx_info **nxp, struct nx_info *nxi, + const char *_file, int _line) +{ + struct nx_info *nxo; + + if (!nxi) + return; + + vxlprintk(VXD_CBIT(nid, 3), "set_nx_info(%p[#%d.%d])", + nxi, nxi ? nxi->nx_id : 0, + nxi ? atomic_read(&nxi->nx_usecnt) : 0, + _file, _line); + + atomic_inc(&nxi->nx_usecnt); + nxo = xchg(nxp, nxi); + BUG_ON(nxo); +} + +#define clr_nx_info(p) __clr_nx_info(p, __FILE__, __LINE__) + +static inline void __clr_nx_info(struct nx_info **nxp, + const char *_file, int _line) +{ + struct nx_info *nxo; + + nxo = xchg(nxp, NULL); + if (!nxo) + return; + + vxlprintk(VXD_CBIT(nid, 3), "clr_nx_info(%p[#%d.%d])", + nxo, nxo ? nxo->nx_id : 0, + nxo ? atomic_read(&nxo->nx_usecnt) : 0, + _file, _line); + + if (atomic_dec_and_test(&nxo->nx_usecnt)) + free_nx_info(nxo); +} + + +#define claim_nx_info(v, p) __claim_nx_info(v, p, __FILE__, __LINE__) + +static inline void __claim_nx_info(struct nx_info *nxi, + struct task_struct *task, const char *_file, int _line) +{ + vxlprintk(VXD_CBIT(nid, 3), "claim_nx_info(%p[#%d.%d.%d]) %p", + nxi, nxi ? nxi->nx_id : 0, + nxi?atomic_read(&nxi->nx_usecnt):0, + nxi?atomic_read(&nxi->nx_tasks):0, + task, _file, _line); + + atomic_inc(&nxi->nx_tasks); +} + + +extern void unhash_nx_info(struct nx_info *); + +#define release_nx_info(v, p) __release_nx_info(v, p, __FILE__, __LINE__) + +static inline void __release_nx_info(struct nx_info *nxi, + struct task_struct *task, const char *_file, int _line) +{ + vxlprintk(VXD_CBIT(nid, 3), "release_nx_info(%p[#%d.%d.%d]) %p", + nxi, nxi ? nxi->nx_id : 0, + nxi ? atomic_read(&nxi->nx_usecnt) : 0, + nxi ? atomic_read(&nxi->nx_tasks) : 0, + task, _file, _line); + + might_sleep(); + + if (atomic_dec_and_test(&nxi->nx_tasks)) + unhash_nx_info(nxi); +} + + +#define task_get_nx_info(i) __task_get_nx_info(i, __FILE__, __LINE__) + +static __inline__ struct nx_info *__task_get_nx_info(struct task_struct *p, + const char *_file, int _line) +{ + struct nx_info *nxi; + + task_lock(p); + vxlprintk(VXD_CBIT(nid, 5), "task_get_nx_info(%p)", + p, _file, _line); + nxi = __get_nx_info(p->nx_info, _file, _line); + task_unlock(p); + return nxi; +} + + +static inline void exit_nx_info(struct task_struct *p) +{ + if (p->nx_info) + release_nx_info(p->nx_info, p); +} + + +#else +#warning duplicate inclusion +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vs_pid.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_pid.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vs_pid.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_pid.h 2012-01-16 14:51:21.965408533 +0100 @@ -0,0 +1,50 @@ +#ifndef _VS_PID_H +#define _VS_PID_H + +#include "vserver/base.h" +#include "vserver/check.h" +#include "vserver/context.h" +#include "vserver/debug.h" +#include "vserver/pid.h" +#include + + +#define VXF_FAKE_INIT (VXF_INFO_INIT | VXF_STATE_INIT) + +static inline +int vx_proc_task_visible(struct task_struct *task) +{ + if ((task->pid == 1) && + !vx_flags(VXF_FAKE_INIT, VXF_FAKE_INIT)) + /* show a blend through init */ + goto visible; + if (vx_check(vx_task_xid(task), VS_WATCH | VS_IDENT)) + goto visible; + return 0; +visible: + return 1; +} + +#define find_task_by_real_pid(pid) find_task_by_pid_ns(pid, &init_pid_ns) + + +static inline +struct task_struct *vx_get_proc_task(struct inode *inode, struct pid *pid) +{ + struct task_struct *task = get_pid_task(pid, PIDTYPE_PID); + + if (task && !vx_proc_task_visible(task)) { + vxdprintk(VXD_CBIT(misc, 6), + "dropping task (get) %p[#%u,%u] for %p[#%u,%u]", + task, task->xid, task->pid, + current, current->xid, current->pid); + put_task_struct(task); + task = NULL; + } + return task; +} + + +#else +#warning duplicate inclusion +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vs_sched.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_sched.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vs_sched.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_sched.h 2012-01-16 14:51:21.965408533 +0100 @@ -0,0 +1,110 @@ +#ifndef _VS_SCHED_H +#define _VS_SCHED_H + +#include "vserver/base.h" +#include "vserver/context.h" +#include "vserver/sched.h" + + +#define VAVAVOOM_RATIO 50 + +#define MAX_PRIO_BIAS 20 +#define MIN_PRIO_BIAS -20 + + +#ifdef CONFIG_VSERVER_HARDCPU + +/* + * effective_prio - return the priority that is based on the static + * priority but is modified by bonuses/penalties. + * + * We scale the actual sleep average [0 .... MAX_SLEEP_AVG] + * into a -4 ... 0 ... +4 bonus/penalty range. + * + * Additionally, we scale another amount based on the number of + * CPU tokens currently held by the context, if the process is + * part of a context (and the appropriate SCHED flag is set). + * This ranges from -5 ... 0 ... +15, quadratically. + * + * So, the total bonus is -9 .. 0 .. +19 + * We use ~50% of the full 0...39 priority range so that: + * + * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs. + * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks. + * unless that context is far exceeding its CPU allocation. + * + * Both properties are important to certain workloads. + */ +static inline +int vx_effective_vavavoom(struct _vx_sched_pc *sched_pc, int max_prio) +{ + int vavavoom, max; + + /* lots of tokens = lots of vavavoom + * no tokens = no vavavoom */ + if ((vavavoom = sched_pc->tokens) >= 0) { + max = sched_pc->tokens_max; + vavavoom = max - vavavoom; + max = max * max; + vavavoom = max_prio * VAVAVOOM_RATIO / 100 + * (vavavoom*vavavoom - (max >> 2)) / max; + return vavavoom; + } + return 0; +} + + +static inline +int vx_adjust_prio(struct task_struct *p, int prio, int max_user) +{ + struct vx_info *vxi = p->vx_info; + struct _vx_sched_pc *sched_pc; + + if (!vxi) + return prio; + + sched_pc = &vx_cpu(vxi, sched_pc); + if (vx_info_flags(vxi, VXF_SCHED_PRIO, 0)) { + int vavavoom = vx_effective_vavavoom(sched_pc, max_user); + + sched_pc->vavavoom = vavavoom; + prio += vavavoom; + } + prio += sched_pc->prio_bias; + return prio; +} + +#else /* !CONFIG_VSERVER_HARDCPU */ + +static inline +int vx_adjust_prio(struct task_struct *p, int prio, int max_user) +{ + struct vx_info *vxi = p->vx_info; + + if (vxi) + prio += vx_cpu(vxi, sched_pc).prio_bias; + return prio; +} + +#endif /* CONFIG_VSERVER_HARDCPU */ + + +static inline void vx_account_user(struct vx_info *vxi, + cputime_t cputime, int nice) +{ + if (!vxi) + return; + vx_cpu(vxi, sched_pc).user_ticks += cputime; +} + +static inline void vx_account_system(struct vx_info *vxi, + cputime_t cputime, int idle) +{ + if (!vxi) + return; + vx_cpu(vxi, sched_pc).sys_ticks += cputime; +} + +#else +#warning duplicate inclusion +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vs_socket.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_socket.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vs_socket.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_socket.h 2012-01-16 14:51:21.965408533 +0100 @@ -0,0 +1,67 @@ +#ifndef _VS_SOCKET_H +#define _VS_SOCKET_H + +#include "vserver/debug.h" +#include "vserver/base.h" +#include "vserver/cacct.h" +#include "vserver/context.h" +#include "vserver/tag.h" + + +/* socket accounting */ + +#include + +static inline int vx_sock_type(int family) +{ + switch (family) { + case PF_UNSPEC: + return VXA_SOCK_UNSPEC; + case PF_UNIX: + return VXA_SOCK_UNIX; + case PF_INET: + return VXA_SOCK_INET; + case PF_INET6: + return VXA_SOCK_INET6; + case PF_PACKET: + return VXA_SOCK_PACKET; + default: + return VXA_SOCK_OTHER; + } +} + +#define vx_acc_sock(v, f, p, s) \ + __vx_acc_sock(v, f, p, s, __FILE__, __LINE__) + +static inline void __vx_acc_sock(struct vx_info *vxi, + int family, int pos, int size, char *file, int line) +{ + if (vxi) { + int type = vx_sock_type(family); + + atomic_long_inc(&vxi->cacct.sock[type][pos].count); + atomic_long_add(size, &vxi->cacct.sock[type][pos].total); + } +} + +#define vx_sock_recv(sk, s) \ + vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 0, s) +#define vx_sock_send(sk, s) \ + vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 1, s) +#define vx_sock_fail(sk, s) \ + vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 2, s) + + +#define sock_vx_init(s) do { \ + (s)->sk_xid = 0; \ + (s)->sk_vx_info = NULL; \ + } while (0) + +#define sock_nx_init(s) do { \ + (s)->sk_nid = 0; \ + (s)->sk_nx_info = NULL; \ + } while (0) + +#else +#warning duplicate inclusion +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vs_tag.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_tag.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vs_tag.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_tag.h 2012-01-16 14:51:21.965408533 +0100 @@ -0,0 +1,47 @@ +#ifndef _VS_TAG_H +#define _VS_TAG_H + +#include + +/* check conditions */ + +#define DX_ADMIN 0x0001 +#define DX_WATCH 0x0002 +#define DX_HOSTID 0x0008 + +#define DX_IDENT 0x0010 + +#define DX_ARG_MASK 0x0010 + + +#define dx_task_tag(t) ((t)->tag) + +#define dx_current_tag() dx_task_tag(current) + +#define dx_check(c, m) __dx_check(dx_current_tag(), c, m) + +#define dx_weak_check(c, m) ((m) ? dx_check(c, m) : 1) + + +/* + * check current context for ADMIN/WATCH and + * optionally against supplied argument + */ +static inline int __dx_check(tag_t cid, tag_t id, unsigned int mode) +{ + if (mode & DX_ARG_MASK) { + if ((mode & DX_IDENT) && (id == cid)) + return 1; + } + return (((mode & DX_ADMIN) && (cid == 0)) || + ((mode & DX_WATCH) && (cid == 1)) || + ((mode & DX_HOSTID) && (id == 0))); +} + +struct inode; +int dx_permission(const struct inode *inode, int mask); + + +#else +#warning duplicate inclusion +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/linux/vs_time.h kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_time.h --- kernel-2.6.32.54/linux-2.6.32/include/linux/vs_time.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/linux/vs_time.h 2012-01-16 14:51:21.965408533 +0100 @@ -0,0 +1,19 @@ +#ifndef _VS_TIME_H +#define _VS_TIME_H + + +/* time faking stuff */ + +#ifdef CONFIG_VSERVER_VTIME + +extern void vx_gettimeofday(struct timeval *tv); +extern int vx_settimeofday(struct timespec *ts); + +#else +#define vx_gettimeofday(t) do_gettimeofday(t) +#define vx_settimeofday(t) do_settimeofday(t) +#endif + +#else +#warning duplicate inclusion +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/include/net/addrconf.h kernel-2.6.32.54.vs/linux-2.6.32/include/net/addrconf.h --- kernel-2.6.32.54/linux-2.6.32/include/net/addrconf.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/net/addrconf.h 2012-01-16 14:51:21.993408435 +0100 @@ -84,7 +84,8 @@ struct net_device *dev, const struct in6_addr *daddr, unsigned int srcprefs, - struct in6_addr *saddr); + struct in6_addr *saddr, + struct nx_info *nxi); extern int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, unsigned char banned_flags); diff -Nur kernel-2.6.32.54/linux-2.6.32/include/net/af_unix.h kernel-2.6.32.54.vs/linux-2.6.32/include/net/af_unix.h --- kernel-2.6.32.54/linux-2.6.32/include/net/af_unix.h 2012-01-16 15:01:39.872725575 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/net/af_unix.h 2012-01-16 14:51:21.993408435 +0100 @@ -4,6 +4,7 @@ #include #include #include +#include #include extern void unix_inflight(struct file *fp); diff -Nur kernel-2.6.32.54/linux-2.6.32/include/net/inet_timewait_sock.h kernel-2.6.32.54.vs/linux-2.6.32/include/net/inet_timewait_sock.h --- kernel-2.6.32.54/linux-2.6.32/include/net/inet_timewait_sock.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/net/inet_timewait_sock.h 2012-01-16 14:51:21.993408435 +0100 @@ -117,6 +117,10 @@ #define tw_hash __tw_common.skc_hash #define tw_prot __tw_common.skc_prot #define tw_net __tw_common.skc_net +#define tw_xid __tw_common.skc_xid +#define tw_vx_info __tw_common.skc_vx_info +#define tw_nid __tw_common.skc_nid +#define tw_nx_info __tw_common.skc_nx_info int tw_timeout; volatile unsigned char tw_substate; /* 3 bits hole, try to pack */ diff -Nur kernel-2.6.32.54/linux-2.6.32/include/net/route.h kernel-2.6.32.54.vs/linux-2.6.32/include/net/route.h --- kernel-2.6.32.54/linux-2.6.32/include/net/route.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/net/route.h 2012-01-16 14:51:21.993408435 +0100 @@ -135,6 +135,9 @@ dst_release(&rt->u.dst); } +#include +#include + #define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3) extern const __u8 ip_tos2prio[16]; @@ -144,6 +147,9 @@ return ip_tos2prio[IPTOS_TOS(tos)>>1]; } +extern int ip_v4_find_src(struct net *net, struct nx_info *, + struct rtable **, struct flowi *); + static inline int ip_route_connect(struct rtable **rp, __be32 dst, __be32 src, u32 tos, int oif, u8 protocol, __be16 sport, __be16 dport, struct sock *sk, @@ -161,11 +167,24 @@ int err; struct net *net = sock_net(sk); + struct nx_info *nx_info = current_nx_info(); if (inet_sk(sk)->transparent) fl.flags |= FLOWI_FLAG_ANYSRC; - if (!dst || !src) { + if (sk) + nx_info = sk->sk_nx_info; + + vxdprintk(VXD_CBIT(net, 4), + "ip_route_connect(%p) %p,%p;%lx", + sk, nx_info, sk->sk_socket, + (sk->sk_socket?sk->sk_socket->flags:0)); + + err = ip_v4_find_src(net, nx_info, rp, &fl); + if (err) + return err; + + if (!fl.fl4_dst || !fl.fl4_src) { err = __ip_route_output_key(net, rp, &fl); if (err) return err; diff -Nur kernel-2.6.32.54/linux-2.6.32/include/net/sock.h kernel-2.6.32.54.vs/linux-2.6.32/include/net/sock.h --- kernel-2.6.32.54/linux-2.6.32/include/net/sock.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/include/net/sock.h 2012-01-16 14:51:21.993408435 +0100 @@ -139,6 +139,10 @@ #ifdef CONFIG_NET_NS struct net *skc_net; #endif + xid_t skc_xid; + struct vx_info *skc_vx_info; + nid_t skc_nid; + struct nx_info *skc_nx_info; }; /** @@ -225,6 +229,10 @@ #define sk_bind_node __sk_common.skc_bind_node #define sk_prot __sk_common.skc_prot #define sk_net __sk_common.skc_net +#define sk_xid __sk_common.skc_xid +#define sk_vx_info __sk_common.skc_vx_info +#define sk_nid __sk_common.skc_nid +#define sk_nx_info __sk_common.skc_nx_info kmemcheck_bitfield_begin(flags); unsigned int sk_shutdown : 2, sk_no_check : 2, diff -Nur kernel-2.6.32.54/linux-2.6.32/init/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/init/Kconfig --- kernel-2.6.32.54/linux-2.6.32/init/Kconfig 2012-01-16 15:01:39.884725533 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/init/Kconfig 2012-01-16 14:51:21.997408421 +0100 @@ -426,6 +426,19 @@ config HAVE_UNSTABLE_SCHED_CLOCK bool +config CFS_HARD_LIMITS + bool "Hard Limits for CFS Group Scheduler" + depends on EXPERIMENTAL + depends on FAIR_GROUP_SCHED && CGROUP_SCHED + default n + help + This option enables hard limiting of CPU time obtained by + a fair task group. Use this if you want to throttle a group of tasks + based on its CPU usage. For more details refer to + Documentation/scheduler/sched-cfs-hard-limits.txt + + Say N if unsure. + menuconfig CGROUPS boolean "Control Group support" help diff -Nur kernel-2.6.32.54/linux-2.6.32/init/main.c kernel-2.6.32.54.vs/linux-2.6.32/init/main.c --- kernel-2.6.32.54/linux-2.6.32/init/main.c 2012-01-16 15:01:40.284724117 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/init/main.c 2012-01-16 14:51:21.997408421 +0100 @@ -70,6 +70,7 @@ #include #include #include +#include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/ipc/mqueue.c kernel-2.6.32.54.vs/linux-2.6.32/ipc/mqueue.c --- kernel-2.6.32.54/linux-2.6.32/ipc/mqueue.c 2012-01-16 15:01:39.884725533 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/ipc/mqueue.c 2012-01-16 14:51:21.997408421 +0100 @@ -33,6 +33,8 @@ #include #include #include +#include +#include #include #include "util.h" @@ -66,6 +68,7 @@ struct sigevent notify; struct pid* notify_owner; struct user_struct *user; /* user who created, for accounting */ + struct vx_info *vxi; struct sock *notify_sock; struct sk_buff *notify_cookie; @@ -125,6 +128,7 @@ if (S_ISREG(mode)) { struct mqueue_inode_info *info; struct task_struct *p = current; + struct vx_info *vxi = p->vx_info; unsigned long mq_bytes, mq_msg_tblsz; inode->i_fop = &mqueue_file_operations; @@ -139,6 +143,7 @@ info->notify_owner = NULL; info->qsize = 0; info->user = NULL; /* set when all is ok */ + info->vxi = NULL; memset(&info->attr, 0, sizeof(info->attr)); info->attr.mq_maxmsg = ipc_ns->mq_msg_max; info->attr.mq_msgsize = ipc_ns->mq_msgsize_max; @@ -153,22 +158,26 @@ spin_lock(&mq_lock); if (u->mq_bytes + mq_bytes < u->mq_bytes || u->mq_bytes + mq_bytes > - p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) { + p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur || + !vx_ipcmsg_avail(vxi, mq_bytes)) { spin_unlock(&mq_lock); goto out_inode; } u->mq_bytes += mq_bytes; + vx_ipcmsg_add(vxi, u, mq_bytes); spin_unlock(&mq_lock); info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL); if (!info->messages) { spin_lock(&mq_lock); u->mq_bytes -= mq_bytes; + vx_ipcmsg_sub(vxi, u, mq_bytes); spin_unlock(&mq_lock); goto out_inode; } /* all is ok */ info->user = get_uid(u); + info->vxi = get_vx_info(vxi); } else if (S_ISDIR(mode)) { inc_nlink(inode); /* Some things misbehave if size == 0 on a directory */ @@ -269,8 +278,11 @@ (info->attr.mq_maxmsg * info->attr.mq_msgsize)); user = info->user; if (user) { + struct vx_info *vxi = info->vxi; + spin_lock(&mq_lock); user->mq_bytes -= mq_bytes; + vx_ipcmsg_sub(vxi, user, mq_bytes); /* * get_ns_from_inode() ensures that the * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns @@ -280,6 +292,7 @@ if (ipc_ns) ipc_ns->mq_queues_count--; spin_unlock(&mq_lock); + put_vx_info(vxi); free_uid(user); } if (ipc_ns) diff -Nur kernel-2.6.32.54/linux-2.6.32/ipc/msg.c kernel-2.6.32.54.vs/linux-2.6.32/ipc/msg.c --- kernel-2.6.32.54/linux-2.6.32/ipc/msg.c 2012-01-16 15:01:39.884725533 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/ipc/msg.c 2012-01-16 14:51:21.997408421 +0100 @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -191,6 +192,7 @@ msq->q_perm.mode = msgflg & S_IRWXUGO; msq->q_perm.key = key; + msq->q_perm.xid = vx_current_xid(); msq->q_perm.security = NULL; retval = security_msg_queue_alloc(msq); diff -Nur kernel-2.6.32.54/linux-2.6.32/ipc/namespace.c kernel-2.6.32.54.vs/linux-2.6.32/ipc/namespace.c --- kernel-2.6.32.54/linux-2.6.32/ipc/namespace.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/ipc/namespace.c 2012-01-16 14:51:21.997408421 +0100 @@ -11,6 +11,8 @@ #include #include #include +#include +#include #include "util.h" diff -Nur kernel-2.6.32.54/linux-2.6.32/ipc/sem.c kernel-2.6.32.54.vs/linux-2.6.32/ipc/sem.c --- kernel-2.6.32.54/linux-2.6.32/ipc/sem.c 2012-01-16 15:01:39.884725533 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/ipc/sem.c 2012-01-16 14:51:21.997408421 +0100 @@ -83,6 +83,8 @@ #include #include #include +#include +#include #include #include "util.h" @@ -256,6 +258,7 @@ sma->sem_perm.mode = (semflg & S_IRWXUGO); sma->sem_perm.key = key; + sma->sem_perm.xid = vx_current_xid(); sma->sem_perm.security = NULL; retval = security_sem_alloc(sma); @@ -271,6 +274,9 @@ return id; } ns->used_sems += nsems; + /* FIXME: obsoleted? */ + vx_semary_inc(sma); + vx_nsems_add(sma, nsems); sma->sem_base = (struct sem *) &sma[1]; INIT_LIST_HEAD(&sma->sem_pending); @@ -547,6 +553,9 @@ sem_unlock(sma); ns->used_sems -= sma->sem_nsems; + /* FIXME: obsoleted? */ + vx_nsems_sub(sma, sma->sem_nsems); + vx_semary_dec(sma); security_sem_free(sma); ipc_rcu_putref(sma); } diff -Nur kernel-2.6.32.54/linux-2.6.32/ipc/shm.c kernel-2.6.32.54.vs/linux-2.6.32/ipc/shm.c --- kernel-2.6.32.54/linux-2.6.32/ipc/shm.c 2012-01-16 15:01:39.884725533 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/ipc/shm.c 2012-01-16 14:51:22.001408407 +0100 @@ -40,6 +40,8 @@ #include #include #include +#include +#include #include @@ -170,7 +172,12 @@ */ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { - ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; + struct vx_info *vxi = lookup_vx_info(shp->shm_perm.xid); + int numpages = (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; + + vx_ipcshm_sub(vxi, shp, numpages); + ns->shm_tot -= numpages; + shm_rmid(ns, shp); shm_unlock(shp); if (!is_file_hugepages(shp->shm_file)) @@ -180,6 +187,7 @@ shp->mlock_user); fput (shp->shm_file); security_shm_free(shp); + put_vx_info(vxi); ipc_rcu_putref(shp); } @@ -350,11 +358,15 @@ if (ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; + if (!vx_ipcshm_avail(current_vx_info(), numpages)) + return -ENOSPC; + shp = ipc_rcu_alloc(sizeof(*shp)); if (!shp) return -ENOMEM; shp->shm_perm.key = key; + shp->shm_perm.xid = vx_current_xid(); shp->shm_perm.mode = (shmflg & S_IRWXUGO); shp->mlock_user = NULL; @@ -408,6 +420,7 @@ ns->shm_tot += numpages; error = shp->shm_perm.id; shm_unlock(shp); + vx_ipcshm_add(current_vx_info(), key, numpages); return error; no_id: diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/capability.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/capability.c --- kernel-2.6.32.54/linux-2.6.32/kernel/capability.c 2012-01-16 15:01:39.892725505 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/capability.c 2012-01-16 14:51:22.001408407 +0100 @@ -14,6 +14,7 @@ #include #include #include +#include #include /* @@ -121,6 +122,7 @@ return 0; } + /* * The only thing that can change the capabilities of the current * process is the current process. As such, we can't be in this code @@ -288,6 +290,8 @@ return ret; } +#include + /** * capable - Determine if the current task has a superior capability in effect * @cap: The capability to be tested for @@ -300,6 +304,9 @@ */ int capable(int cap) { + /* here for now so we don't require task locking */ + if (vs_check_bit(VXC_CAP_MASK, cap) && !vx_mcaps(1L << cap)) + return 0; if (unlikely(!cap_valid(cap))) { printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap); BUG(); diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/compat.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/compat.c --- kernel-2.6.32.54/linux-2.6.32/kernel/compat.c 2012-01-16 15:01:39.892725505 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/compat.c 2012-01-16 14:51:22.001408407 +0100 @@ -900,7 +900,7 @@ compat_time_t i; struct timeval tv; - do_gettimeofday(&tv); + vx_gettimeofday(&tv); i = tv.tv_sec; if (tloc) { @@ -925,7 +925,7 @@ if (err) return err; - do_settimeofday(&tv); + vx_settimeofday(&tv); return 0; } diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/exit.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/exit.c --- kernel-2.6.32.54/linux-2.6.32/kernel/exit.c 2012-01-16 15:01:39.892725505 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/exit.c 2012-01-16 14:51:22.001408407 +0100 @@ -48,6 +48,10 @@ #include #include #include +#include +#include +#include +#include #include #include @@ -495,9 +499,11 @@ filp_close(file, files); cond_resched(); } + vx_openfd_dec(i); } i++; set >>= 1; + cond_resched(); } } } @@ -1027,11 +1033,16 @@ validate_creds_for_do_exit(tsk); + /* needs to stay after exit_notify() */ + exit_vx_info(tsk, code); + exit_nx_info(tsk); + preempt_disable(); exit_rcu(); /* causes final put_task_struct in finish_task_switch(). */ tsk->state = TASK_DEAD; schedule(); + printk("bad task: %p [%lx]\n", current, current->state); BUG(); /* Avoid "noreturn function does return". */ for (;;) diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/fork.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/fork.c --- kernel-2.6.32.54/linux-2.6.32/kernel/fork.c 2012-01-16 15:01:40.284724117 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/fork.c 2012-01-16 14:51:22.005408393 +0100 @@ -64,6 +64,10 @@ #include #include #include +#include +#include +#include +#include #include #include @@ -152,6 +156,8 @@ account_kernel_stack(tsk->stack, -1); free_thread_info(tsk->stack); rt_mutex_debug_task_free(tsk); + clr_vx_info(&tsk->vx_info); + clr_nx_info(&tsk->nx_info); ftrace_graph_exit_task(tsk); free_task_struct(tsk); } @@ -297,6 +303,8 @@ mm->free_area_cache = oldmm->mmap_base; mm->cached_hole_size = ~0UL; mm->map_count = 0; + __set_mm_counter(mm, file_rss, 0); + __set_mm_counter(mm, anon_rss, 0); cpumask_clear(mm_cpumask(mm)); mm->mm_rb = RB_ROOT; rb_link = &mm->mm_rb.rb_node; @@ -312,7 +320,7 @@ if (mpnt->vm_flags & VM_DONTCOPY) { long pages = vma_pages(mpnt); - mm->total_vm -= pages; + vx_vmpages_sub(mm, pages); vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, -pages); continue; @@ -456,8 +464,8 @@ (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; mm->core_state = NULL; mm->nr_ptes = 0; - set_mm_counter(mm, file_rss, 0); - set_mm_counter(mm, anon_rss, 0); + __set_mm_counter(mm, file_rss, 0); + __set_mm_counter(mm, anon_rss, 0); spin_lock_init(&mm->page_table_lock); mm->free_area_cache = TASK_UNMAPPED_BASE; mm->cached_hole_size = ~0UL; @@ -467,6 +475,7 @@ if (likely(!mm_alloc_pgd(mm))) { mm->def_flags = 0; mmu_notifier_mm_init(mm); + set_vx_info(&mm->mm_vx_info, p->vx_info); return mm; } @@ -500,6 +509,7 @@ mm_free_pgd(mm); destroy_context(mm); mmu_notifier_mm_destroy(mm); + clr_vx_info(&mm->mm_vx_info); free_mm(mm); } EXPORT_SYMBOL_GPL(__mmdrop); @@ -635,6 +645,7 @@ goto fail_nomem; memcpy(mm, oldmm, sizeof(*mm)); + mm->mm_vx_info = NULL; /* Initializing for Swap token stuff */ mm->token_priority = 0; @@ -673,6 +684,7 @@ * If init_new_context() failed, we cannot use mmput() to free the mm * because it calls destroy_context() */ + clr_vx_info(&mm->mm_vx_info); mm_free_pgd(mm); free_mm(mm); return NULL; @@ -987,6 +999,8 @@ int retval; struct task_struct *p; int cgroup_callbacks_done = 0; + struct vx_info *vxi; + struct nx_info *nxi; if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); @@ -1033,12 +1047,28 @@ DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif + init_vx_info(&p->vx_info, current_vx_info()); + init_nx_info(&p->nx_info, current_nx_info()); + + /* check vserver memory */ + if (p->mm && !(clone_flags & CLONE_VM)) { + if (vx_vmpages_avail(p->mm, p->mm->total_vm)) + vx_pages_add(p->vx_info, RLIMIT_AS, p->mm->total_vm); + else + goto bad_fork_free; + } + if (p->mm && vx_flags(VXF_FORK_RSS, 0)) { + if (!vx_rss_avail(p->mm, get_mm_counter(p->mm, file_rss))) + goto bad_fork_cleanup_vm; + } retval = -EAGAIN; + if (!vx_nproc_avail(1)) + goto bad_fork_cleanup_vm; if (atomic_read(&p->real_cred->user->processes) >= p->signal->rlim[RLIMIT_NPROC].rlim_cur) { if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && p->real_cred->user != INIT_USER) - goto bad_fork_free; + goto bad_fork_cleanup_vm; } retval = copy_creds(p, clone_flags); @@ -1290,6 +1320,18 @@ total_forks++; spin_unlock(¤t->sighand->siglock); + + /* p is copy of current */ + vxi = p->vx_info; + if (vxi) { + claim_vx_info(vxi, p); + atomic_inc(&vxi->cvirt.nr_threads); + atomic_inc(&vxi->cvirt.total_forks); + vx_nproc_inc(p); + } + nxi = p->nx_info; + if (nxi) + claim_nx_info(nxi, p); write_unlock_irq(&tasklist_lock); proc_fork_connector(p); cgroup_post_fork(p); @@ -1331,6 +1373,9 @@ bad_fork_cleanup_count: atomic_dec(&p->cred->user->processes); exit_creds(p); +bad_fork_cleanup_vm: + if (p->mm && !(clone_flags & CLONE_VM)) + vx_pages_sub(p->vx_info, RLIMIT_AS, p->mm->total_vm); bad_fork_free: free_task(p); fork_out: diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/fork.c.orig kernel-2.6.32.54.vs/linux-2.6.32/kernel/fork.c.orig --- kernel-2.6.32.54/linux-2.6.32/kernel/fork.c.orig 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/fork.c.orig 2012-01-16 14:47:19.474254858 +0100 @@ -0,0 +1,1757 @@ +/* + * linux/kernel/fork.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +/* + * 'fork.c' contains the help-routines for the 'fork' system call + * (see also entry.S and others). + * Fork is rather simple, once you get the hang of it, but the memory + * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +/* + * Protected counters by write_lock_irq(&tasklist_lock) + */ +unsigned long total_forks; /* Handle normal Linux uptimes. */ +int nr_threads; /* The idle threads do not count.. */ + +int max_threads; /* tunable limit on nr_threads */ + +DEFINE_PER_CPU(unsigned long, process_counts) = 0; + +__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ +EXPORT_SYMBOL_GPL(tasklist_lock); + +int nr_processes(void) +{ + int cpu; + int total = 0; + + for_each_possible_cpu(cpu) + total += per_cpu(process_counts, cpu); + + return total; +} + +#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR +# define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL) +# define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk)) +static struct kmem_cache *task_struct_cachep; +#endif + +#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR +static inline struct thread_info *alloc_thread_info(struct task_struct *tsk) +{ +#ifdef CONFIG_DEBUG_STACK_USAGE + gfp_t mask = GFP_KERNEL | __GFP_ZERO; +#else + gfp_t mask = GFP_KERNEL; +#endif + return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER); +} + +static inline void free_thread_info(struct thread_info *ti) +{ + free_pages((unsigned long)ti, THREAD_SIZE_ORDER); +} +#endif + +/* SLAB cache for signal_struct structures (tsk->signal) */ +static struct kmem_cache *signal_cachep; + +/* SLAB cache for sighand_struct structures (tsk->sighand) */ +struct kmem_cache *sighand_cachep; + +/* SLAB cache for files_struct structures (tsk->files) */ +struct kmem_cache *files_cachep; + +/* SLAB cache for fs_struct structures (tsk->fs) */ +struct kmem_cache *fs_cachep; + +/* SLAB cache for vm_area_struct structures */ +struct kmem_cache *vm_area_cachep; + +/* SLAB cache for mm_struct structures (tsk->mm) */ +static struct kmem_cache *mm_cachep; + +static void account_kernel_stack(struct thread_info *ti, int account) +{ + struct zone *zone = page_zone(virt_to_page(ti)); + + mod_zone_page_state(zone, NR_KERNEL_STACK, account); +} + +void free_task(struct task_struct *tsk) +{ + prop_local_destroy_single(&tsk->dirties); + account_kernel_stack(tsk->stack, -1); + free_thread_info(tsk->stack); + rt_mutex_debug_task_free(tsk); + ftrace_graph_exit_task(tsk); + free_task_struct(tsk); +} +EXPORT_SYMBOL(free_task); + +void __put_task_struct(struct task_struct *tsk) +{ + WARN_ON(!tsk->exit_state); + WARN_ON(atomic_read(&tsk->usage)); + WARN_ON(tsk == current); + + exit_creds(tsk); + delayacct_tsk_free(tsk); + + if (!profile_handoff_task(tsk)) + free_task(tsk); +} + +/* + * macro override instead of weak attribute alias, to workaround + * gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions. + */ +#ifndef arch_task_cache_init +#define arch_task_cache_init() +#endif + +void __init fork_init(unsigned long mempages) +{ +#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR +#ifndef ARCH_MIN_TASKALIGN +#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES +#endif + /* create a slab on which task_structs can be allocated */ + task_struct_cachep = + kmem_cache_create("task_struct", sizeof(struct task_struct), + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); +#endif + + /* do the arch specific task caches init */ + arch_task_cache_init(); + + /* + * The default maximum number of threads is set to a safe + * value: the thread structures can take up at most half + * of memory. + */ + max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE); + + /* + * we need to allow at least 20 threads to boot a system + */ + if(max_threads < 20) + max_threads = 20; + + init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; + init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; + init_task.signal->rlim[RLIMIT_SIGPENDING] = + init_task.signal->rlim[RLIMIT_NPROC]; +} + +int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst, + struct task_struct *src) +{ + *dst = *src; + return 0; +} + +static struct task_struct *dup_task_struct(struct task_struct *orig) +{ + struct task_struct *tsk; + struct thread_info *ti; + unsigned long *stackend; + + int err; + + prepare_to_copy(orig); + + tsk = alloc_task_struct(); + if (!tsk) + return NULL; + + ti = alloc_thread_info(tsk); + if (!ti) { + free_task_struct(tsk); + return NULL; + } + + err = arch_dup_task_struct(tsk, orig); + if (err) + goto out; + + tsk->stack = ti; + + err = prop_local_init_single(&tsk->dirties); + if (err) + goto out; + + setup_thread_stack(tsk, orig); + stackend = end_of_stack(tsk); + *stackend = STACK_END_MAGIC; /* for overflow detection */ + +#ifdef CONFIG_CC_STACKPROTECTOR + tsk->stack_canary = get_random_int(); +#endif + + /* One for us, one for whoever does the "release_task()" (usually parent) */ + atomic_set(&tsk->usage,2); + atomic_set(&tsk->fs_excl, 0); +#ifdef CONFIG_BLK_DEV_IO_TRACE + tsk->btrace_seq = 0; +#endif + tsk->splice_pipe = NULL; + + account_kernel_stack(ti, 1); + + return tsk; + +out: + free_thread_info(ti); + free_task_struct(tsk); + return NULL; +} + +#ifdef CONFIG_MMU +static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) +{ + struct vm_area_struct *mpnt, *tmp, *prev, **pprev; + struct rb_node **rb_link, *rb_parent; + int retval; + unsigned long charge; + struct mempolicy *pol; + + down_write(&oldmm->mmap_sem); + flush_cache_dup_mm(oldmm); + /* + * Not linked in yet - no deadlock potential: + */ + down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); + + mm->locked_vm = 0; + mm->mmap = NULL; + mm->mmap_cache = NULL; + mm->free_area_cache = oldmm->mmap_base; + mm->cached_hole_size = ~0UL; + mm->map_count = 0; + cpumask_clear(mm_cpumask(mm)); + mm->mm_rb = RB_ROOT; + rb_link = &mm->mm_rb.rb_node; + rb_parent = NULL; + pprev = &mm->mmap; + retval = ksm_fork(mm, oldmm); + if (retval) + goto out; + + prev = NULL; + for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { + struct file *file; + + if (mpnt->vm_flags & VM_DONTCOPY) { + long pages = vma_pages(mpnt); + mm->total_vm -= pages; + vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, + -pages); + continue; + } + charge = 0; + if (mpnt->vm_flags & VM_ACCOUNT) { + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; + if (security_vm_enough_memory(len)) + goto fail_nomem; + charge = len; + } + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + if (!tmp) + goto fail_nomem; + *tmp = *mpnt; + pol = mpol_dup(vma_policy(mpnt)); + retval = PTR_ERR(pol); + if (IS_ERR(pol)) + goto fail_nomem_policy; + vma_set_policy(tmp, pol); + tmp->vm_flags &= ~VM_LOCKED; + tmp->vm_mm = mm; + tmp->vm_next = tmp->vm_prev = NULL; + anon_vma_link(tmp); + file = tmp->vm_file; + if (file) { + struct inode *inode = file->f_path.dentry->d_inode; + struct address_space *mapping = file->f_mapping; + + get_file(file); + if (tmp->vm_flags & VM_DENYWRITE) + atomic_dec(&inode->i_writecount); + spin_lock(&mapping->i_mmap_lock); + if (tmp->vm_flags & VM_SHARED) + mapping->i_mmap_writable++; + tmp->vm_truncate_count = mpnt->vm_truncate_count; + flush_dcache_mmap_lock(mapping); + /* insert tmp into the share list, just after mpnt */ + vma_prio_tree_add(tmp, mpnt); + flush_dcache_mmap_unlock(mapping); + spin_unlock(&mapping->i_mmap_lock); + } + + /* + * Clear hugetlb-related page reserves for children. This only + * affects MAP_PRIVATE mappings. Faults generated by the child + * are not guaranteed to succeed, even if read-only + */ + if (is_vm_hugetlb_page(tmp)) + reset_vma_resv_huge_pages(tmp); + + /* + * Link in the new vma and copy the page table entries. + */ + *pprev = tmp; + pprev = &tmp->vm_next; + tmp->vm_prev = prev; + prev = tmp; + + __vma_link_rb(mm, tmp, rb_link, rb_parent); + rb_link = &tmp->vm_rb.rb_right; + rb_parent = &tmp->vm_rb; + + mm->map_count++; + retval = copy_page_range(mm, oldmm, mpnt); + + if (tmp->vm_ops && tmp->vm_ops->open) + tmp->vm_ops->open(tmp); + + if (retval) + goto out; + } + /* a new mm has just been created */ + arch_dup_mmap(oldmm, mm); + retval = 0; +out: + up_write(&mm->mmap_sem); + flush_tlb_mm(oldmm); + up_write(&oldmm->mmap_sem); + return retval; +fail_nomem_policy: + kmem_cache_free(vm_area_cachep, tmp); +fail_nomem: + retval = -ENOMEM; + vm_unacct_memory(charge); + goto out; +} + +static inline int mm_alloc_pgd(struct mm_struct * mm) +{ + mm->pgd = pgd_alloc(mm); + if (unlikely(!mm->pgd)) + return -ENOMEM; + return 0; +} + +static inline void mm_free_pgd(struct mm_struct * mm) +{ + pgd_free(mm, mm->pgd); +} +#else +#define dup_mmap(mm, oldmm) (0) +#define mm_alloc_pgd(mm) (0) +#define mm_free_pgd(mm) +#endif /* CONFIG_MMU */ + +__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); + +#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) +#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) + +static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; + +static int __init coredump_filter_setup(char *s) +{ + default_dump_filter = + (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & + MMF_DUMP_FILTER_MASK; + return 1; +} + +__setup("coredump_filter=", coredump_filter_setup); + +#include + +static void mm_init_aio(struct mm_struct *mm) +{ +#ifdef CONFIG_AIO + spin_lock_init(&mm->ioctx_lock); + INIT_HLIST_HEAD(&mm->ioctx_list); +#endif +} + +static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) +{ + atomic_set(&mm->mm_users, 1); + atomic_set(&mm->mm_count, 1); + init_rwsem(&mm->mmap_sem); + INIT_LIST_HEAD(&mm->mmlist); + mm->flags = (current->mm) ? + (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; + mm->core_state = NULL; + mm->nr_ptes = 0; + set_mm_counter(mm, file_rss, 0); + set_mm_counter(mm, anon_rss, 0); + spin_lock_init(&mm->page_table_lock); + mm->free_area_cache = TASK_UNMAPPED_BASE; + mm->cached_hole_size = ~0UL; + mm_init_aio(mm); + mm_init_owner(mm, p); + + if (likely(!mm_alloc_pgd(mm))) { + mm->def_flags = 0; + mmu_notifier_mm_init(mm); + return mm; + } + + free_mm(mm); + return NULL; +} + +/* + * Allocate and initialize an mm_struct. + */ +struct mm_struct * mm_alloc(void) +{ + struct mm_struct * mm; + + mm = allocate_mm(); + if (mm) { + memset(mm, 0, sizeof(*mm)); + mm = mm_init(mm, current); + } + return mm; +} + +/* + * Called when the last reference to the mm + * is dropped: either by a lazy thread or by + * mmput. Free the page directory and the mm. + */ +void __mmdrop(struct mm_struct *mm) +{ + BUG_ON(mm == &init_mm); + mm_free_pgd(mm); + destroy_context(mm); + mmu_notifier_mm_destroy(mm); + free_mm(mm); +} +EXPORT_SYMBOL_GPL(__mmdrop); + +/* + * Decrement the use count and release all resources for an mm. + */ +void mmput(struct mm_struct *mm) +{ + might_sleep(); + + if (atomic_dec_and_test(&mm->mm_users)) { + exit_aio(mm); + ksm_exit(mm); + exit_mmap(mm); + set_mm_exe_file(mm, NULL); + if (!list_empty(&mm->mmlist)) { + spin_lock(&mmlist_lock); + list_del(&mm->mmlist); + spin_unlock(&mmlist_lock); + } + put_swap_token(mm); + if (mm->binfmt) + module_put(mm->binfmt->module); + mmdrop(mm); + } +} +EXPORT_SYMBOL_GPL(mmput); + +/** + * get_task_mm - acquire a reference to the task's mm + * + * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning + * this kernel workthread has transiently adopted a user mm with use_mm, + * to do its AIO) is not set and if so returns a reference to it, after + * bumping up the use count. User must release the mm via mmput() + * after use. Typically used by /proc and ptrace. + */ +struct mm_struct *get_task_mm(struct task_struct *task) +{ + struct mm_struct *mm; + + task_lock(task); + mm = task->mm; + if (mm) { + if (task->flags & PF_KTHREAD) + mm = NULL; + else + atomic_inc(&mm->mm_users); + } + task_unlock(task); + return mm; +} +EXPORT_SYMBOL_GPL(get_task_mm); + +/* Please note the differences between mmput and mm_release. + * mmput is called whenever we stop holding onto a mm_struct, + * error success whatever. + * + * mm_release is called after a mm_struct has been removed + * from the current process. + * + * This difference is important for error handling, when we + * only half set up a mm_struct for a new process and need to restore + * the old one. Because we mmput the new mm_struct before + * restoring the old one. . . + * Eric Biederman 10 January 1998 + */ +void mm_release(struct task_struct *tsk, struct mm_struct *mm) +{ + struct completion *vfork_done = tsk->vfork_done; + + /* Get rid of any futexes when releasing the mm */ +#ifdef CONFIG_FUTEX + if (unlikely(tsk->robust_list)) { + exit_robust_list(tsk); + tsk->robust_list = NULL; + } +#ifdef CONFIG_COMPAT + if (unlikely(tsk->compat_robust_list)) { + compat_exit_robust_list(tsk); + tsk->compat_robust_list = NULL; + } +#endif + if (unlikely(!list_empty(&tsk->pi_state_list))) + exit_pi_state_list(tsk); +#endif + + /* Get rid of any cached register state */ + deactivate_mm(tsk, mm); + + /* notify parent sleeping on vfork() */ + if (vfork_done) { + tsk->vfork_done = NULL; + complete(vfork_done); + } + + /* + * If we're exiting normally, clear a user-space tid field if + * requested. We leave this alone when dying by signal, to leave + * the value intact in a core dump, and to save the unnecessary + * trouble otherwise. Userland only wants this done for a sys_exit. + */ + if (tsk->clear_child_tid) { + if (!(tsk->flags & PF_SIGNALED) && + atomic_read(&mm->mm_users) > 1) { + /* + * We don't check the error code - if userspace has + * not set up a proper pointer then tough luck. + */ + put_user(0, tsk->clear_child_tid); + sys_futex(tsk->clear_child_tid, FUTEX_WAKE, + 1, NULL, NULL, 0); + } + tsk->clear_child_tid = NULL; + } +} + +/* + * Allocate a new mm structure and copy contents from the + * mm structure of the passed in task structure. + */ +struct mm_struct *dup_mm(struct task_struct *tsk) +{ + struct mm_struct *mm, *oldmm = current->mm; + int err; + + if (!oldmm) + return NULL; + + mm = allocate_mm(); + if (!mm) + goto fail_nomem; + + memcpy(mm, oldmm, sizeof(*mm)); + + /* Initializing for Swap token stuff */ + mm->token_priority = 0; + mm->last_interval = 0; + + if (!mm_init(mm, tsk)) + goto fail_nomem; + + if (init_new_context(tsk, mm)) + goto fail_nocontext; + + dup_mm_exe_file(oldmm, mm); + + err = dup_mmap(mm, oldmm); + if (err) + goto free_pt; + + mm->hiwater_rss = get_mm_rss(mm); + mm->hiwater_vm = mm->total_vm; + + if (mm->binfmt && !try_module_get(mm->binfmt->module)) + goto free_pt; + + return mm; + +free_pt: + /* don't put binfmt in mmput, we haven't got module yet */ + mm->binfmt = NULL; + mmput(mm); + +fail_nomem: + return NULL; + +fail_nocontext: + /* + * If init_new_context() failed, we cannot use mmput() to free the mm + * because it calls destroy_context() + */ + mm_free_pgd(mm); + free_mm(mm); + return NULL; +} + +static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) +{ + struct mm_struct * mm, *oldmm; + int retval; + + tsk->min_flt = tsk->maj_flt = 0; + tsk->nvcsw = tsk->nivcsw = 0; +#ifdef CONFIG_DETECT_HUNG_TASK + tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; +#endif + + tsk->mm = NULL; + tsk->active_mm = NULL; + + /* + * Are we cloning a kernel thread? + * + * We need to steal a active VM for that.. + */ + oldmm = current->mm; + if (!oldmm) + return 0; + + if (clone_flags & CLONE_VM) { + atomic_inc(&oldmm->mm_users); + mm = oldmm; + goto good_mm; + } + + retval = -ENOMEM; + mm = dup_mm(tsk); + if (!mm) + goto fail_nomem; + +good_mm: + /* Initializing for Swap token stuff */ + mm->token_priority = 0; + mm->last_interval = 0; + + tsk->mm = mm; + tsk->active_mm = mm; + return 0; + +fail_nomem: + return retval; +} + +static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) +{ + struct fs_struct *fs = current->fs; + if (clone_flags & CLONE_FS) { + /* tsk->fs is already what we want */ + write_lock(&fs->lock); + if (fs->in_exec) { + write_unlock(&fs->lock); + return -EAGAIN; + } + fs->users++; + write_unlock(&fs->lock); + return 0; + } + tsk->fs = copy_fs_struct(fs); + if (!tsk->fs) + return -ENOMEM; + return 0; +} + +static int copy_files(unsigned long clone_flags, struct task_struct * tsk) +{ + struct files_struct *oldf, *newf; + int error = 0; + + /* + * A background process may not have any files ... + */ + oldf = current->files; + if (!oldf) + goto out; + + if (clone_flags & CLONE_FILES) { + atomic_inc(&oldf->count); + goto out; + } + + newf = dup_fd(oldf, &error); + if (!newf) + goto out; + + tsk->files = newf; + error = 0; +out: + return error; +} + +static int copy_io(unsigned long clone_flags, struct task_struct *tsk) +{ +#ifdef CONFIG_BLOCK + struct io_context *ioc = current->io_context; + + if (!ioc) + return 0; + /* + * Share io context with parent, if CLONE_IO is set + */ + if (clone_flags & CLONE_IO) { + tsk->io_context = ioc_task_link(ioc); + if (unlikely(!tsk->io_context)) + return -ENOMEM; + } else if (ioprio_valid(ioc->ioprio)) { + tsk->io_context = alloc_io_context(GFP_KERNEL, -1); + if (unlikely(!tsk->io_context)) + return -ENOMEM; + + tsk->io_context->ioprio = ioc->ioprio; + } +#endif + return 0; +} + +static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) +{ + struct sighand_struct *sig; + + if (clone_flags & CLONE_SIGHAND) { + atomic_inc(¤t->sighand->count); + return 0; + } + sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); + rcu_assign_pointer(tsk->sighand, sig); + if (!sig) + return -ENOMEM; + atomic_set(&sig->count, 1); + memcpy(sig->action, current->sighand->action, sizeof(sig->action)); + return 0; +} + +void __cleanup_sighand(struct sighand_struct *sighand) +{ + if (atomic_dec_and_test(&sighand->count)) + kmem_cache_free(sighand_cachep, sighand); +} + + +/* + * Initialize POSIX timer handling for a thread group. + */ +static void posix_cpu_timers_init_group(struct signal_struct *sig) +{ + /* Thread group counters. */ + thread_group_cputime_init(sig); + + /* Expiration times and increments. */ + sig->it[CPUCLOCK_PROF].expires = cputime_zero; + sig->it[CPUCLOCK_PROF].incr = cputime_zero; + sig->it[CPUCLOCK_VIRT].expires = cputime_zero; + sig->it[CPUCLOCK_VIRT].incr = cputime_zero; + + /* Cached expiration times. */ + sig->cputime_expires.prof_exp = cputime_zero; + sig->cputime_expires.virt_exp = cputime_zero; + sig->cputime_expires.sched_exp = 0; + + if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { + sig->cputime_expires.prof_exp = + secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); + sig->cputimer.running = 1; + } + + /* The timer lists. */ + INIT_LIST_HEAD(&sig->cpu_timers[0]); + INIT_LIST_HEAD(&sig->cpu_timers[1]); + INIT_LIST_HEAD(&sig->cpu_timers[2]); +} + +static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) +{ + struct signal_struct *sig; + + if (clone_flags & CLONE_THREAD) + return 0; + + sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); + tsk->signal = sig; + if (!sig) + return -ENOMEM; + + atomic_set(&sig->count, 1); + atomic_set(&sig->live, 1); + init_waitqueue_head(&sig->wait_chldexit); + sig->flags = 0; + if (clone_flags & CLONE_NEWPID) + sig->flags |= SIGNAL_UNKILLABLE; + sig->group_exit_code = 0; + sig->group_exit_task = NULL; + sig->group_stop_count = 0; + sig->curr_target = tsk; + init_sigpending(&sig->shared_pending); + INIT_LIST_HEAD(&sig->posix_timers); + + hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + sig->it_real_incr.tv64 = 0; + sig->real_timer.function = it_real_fn; + + sig->leader = 0; /* session leadership doesn't inherit */ + sig->tty_old_pgrp = NULL; + sig->tty = NULL; + + sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; + sig->gtime = cputime_zero; + sig->cgtime = cputime_zero; +#ifndef CONFIG_VIRT_CPU_ACCOUNTING + sig->prev_utime = sig->prev_stime = cputime_zero; +#endif + sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; + sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; + sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; + sig->maxrss = sig->cmaxrss = 0; + task_io_accounting_init(&sig->ioac); + sig->sum_sched_runtime = 0; + taskstats_tgid_init(sig); + + task_lock(current->group_leader); + memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); + task_unlock(current->group_leader); + + posix_cpu_timers_init_group(sig); + + acct_init_pacct(&sig->pacct); + + tty_audit_fork(sig); + + sig->oom_adj = current->signal->oom_adj; + + return 0; +} + +void __cleanup_signal(struct signal_struct *sig) +{ + thread_group_cputime_free(sig); + tty_kref_put(sig->tty); + kmem_cache_free(signal_cachep, sig); +} + +static void copy_flags(unsigned long clone_flags, struct task_struct *p) +{ + unsigned long new_flags = p->flags; + + new_flags &= ~PF_SUPERPRIV; + new_flags |= PF_FORKNOEXEC; + new_flags |= PF_STARTING; + p->flags = new_flags; + clear_freeze_flag(p); +} + +SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) +{ + current->clear_child_tid = tidptr; + + return task_pid_vnr(current); +} + +static void rt_mutex_init_task(struct task_struct *p) +{ + spin_lock_init(&p->pi_lock); +#ifdef CONFIG_RT_MUTEXES + plist_head_init(&p->pi_waiters, &p->pi_lock); + p->pi_blocked_on = NULL; +#endif +} + +#ifdef CONFIG_MM_OWNER +void mm_init_owner(struct mm_struct *mm, struct task_struct *p) +{ + mm->owner = p; +} +#endif /* CONFIG_MM_OWNER */ + +/* + * Initialize POSIX timer handling for a single task. + */ +static void posix_cpu_timers_init(struct task_struct *tsk) +{ + tsk->cputime_expires.prof_exp = cputime_zero; + tsk->cputime_expires.virt_exp = cputime_zero; + tsk->cputime_expires.sched_exp = 0; + INIT_LIST_HEAD(&tsk->cpu_timers[0]); + INIT_LIST_HEAD(&tsk->cpu_timers[1]); + INIT_LIST_HEAD(&tsk->cpu_timers[2]); +} + +/* + * This creates a new process as a copy of the old one, + * but does not actually start it yet. + * + * It copies the registers, and all the appropriate + * parts of the process environment (as per the clone + * flags). The actual kick-off is left to the caller. + */ +static struct task_struct *copy_process(unsigned long clone_flags, + unsigned long stack_start, + struct pt_regs *regs, + unsigned long stack_size, + int __user *child_tidptr, + struct pid *pid, + int trace) +{ + int retval; + struct task_struct *p; + int cgroup_callbacks_done = 0; + + if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) + return ERR_PTR(-EINVAL); + + /* + * Thread groups must share signals as well, and detached threads + * can only be started up within the thread group. + */ + if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) + return ERR_PTR(-EINVAL); + + /* + * Shared signal handlers imply shared VM. By way of the above, + * thread groups also imply shared VM. Blocking this case allows + * for various simplifications in other code. + */ + if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) + return ERR_PTR(-EINVAL); + + /* + * Siblings of global init remain as zombies on exit since they are + * not reaped by their parent (swapper). To solve this and to avoid + * multi-rooted process trees, prevent global and container-inits + * from creating siblings. + */ + if ((clone_flags & CLONE_PARENT) && + current->signal->flags & SIGNAL_UNKILLABLE) + return ERR_PTR(-EINVAL); + + retval = security_task_create(clone_flags); + if (retval) + goto fork_out; + + retval = -ENOMEM; + p = dup_task_struct(current); + if (!p) + goto fork_out; + + ftrace_graph_init_task(p); + + rt_mutex_init_task(p); + +#ifdef CONFIG_PROVE_LOCKING + DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); + DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); +#endif + retval = -EAGAIN; + if (atomic_read(&p->real_cred->user->processes) >= + p->signal->rlim[RLIMIT_NPROC].rlim_cur) { + if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && + p->real_cred->user != INIT_USER) + goto bad_fork_free; + } + + retval = copy_creds(p, clone_flags); + if (retval < 0) + goto bad_fork_free; + + /* + * If multiple threads are within copy_process(), then this check + * triggers too late. This doesn't hurt, the check is only there + * to stop root fork bombs. + */ + retval = -EAGAIN; + if (nr_threads >= max_threads) + goto bad_fork_cleanup_count; + + if (!try_module_get(task_thread_info(p)->exec_domain->module)) + goto bad_fork_cleanup_count; + + p->did_exec = 0; + delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ + copy_flags(clone_flags, p); + INIT_LIST_HEAD(&p->children); + INIT_LIST_HEAD(&p->sibling); + rcu_copy_process(p); + p->vfork_done = NULL; + spin_lock_init(&p->alloc_lock); + + init_sigpending(&p->pending); + + p->utime = cputime_zero; + p->stime = cputime_zero; + p->gtime = cputime_zero; + p->utimescaled = cputime_zero; + p->stimescaled = cputime_zero; + p->prev_utime = cputime_zero; + p->prev_stime = cputime_zero; + + p->default_timer_slack_ns = current->timer_slack_ns; + + task_io_accounting_init(&p->ioac); + acct_clear_integrals(p); + + posix_cpu_timers_init(p); + + p->lock_depth = -1; /* -1 = no lock */ + do_posix_clock_monotonic_gettime(&p->start_time); + p->real_start_time = p->start_time; + monotonic_to_bootbased(&p->real_start_time); + p->io_context = NULL; + p->audit_context = NULL; + cgroup_fork(p); +#ifdef CONFIG_NUMA + p->mempolicy = mpol_dup(p->mempolicy); + if (IS_ERR(p->mempolicy)) { + retval = PTR_ERR(p->mempolicy); + p->mempolicy = NULL; + goto bad_fork_cleanup_cgroup; + } + mpol_fix_fork_child_flag(p); +#endif +#ifdef CONFIG_TRACE_IRQFLAGS + p->irq_events = 0; +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW + p->hardirqs_enabled = 1; +#else + p->hardirqs_enabled = 0; +#endif + p->hardirq_enable_ip = 0; + p->hardirq_enable_event = 0; + p->hardirq_disable_ip = _THIS_IP_; + p->hardirq_disable_event = 0; + p->softirqs_enabled = 1; + p->softirq_enable_ip = _THIS_IP_; + p->softirq_enable_event = 0; + p->softirq_disable_ip = 0; + p->softirq_disable_event = 0; + p->hardirq_context = 0; + p->softirq_context = 0; +#endif +#ifdef CONFIG_LOCKDEP + p->lockdep_depth = 0; /* no locks held yet */ + p->curr_chain_key = 0; + p->lockdep_recursion = 0; +#endif + +#ifdef CONFIG_DEBUG_MUTEXES + p->blocked_on = NULL; /* not blocked yet */ +#endif + + p->bts = NULL; + + /* Perform scheduler related setup. Assign this task to a CPU. */ + sched_fork(p, clone_flags); + + retval = perf_event_init_task(p); + if (retval) + goto bad_fork_cleanup_policy; + + if ((retval = audit_alloc(p))) + goto bad_fork_cleanup_policy; + /* copy all the process information */ + if ((retval = copy_semundo(clone_flags, p))) + goto bad_fork_cleanup_audit; + if ((retval = copy_files(clone_flags, p))) + goto bad_fork_cleanup_semundo; + if ((retval = copy_fs(clone_flags, p))) + goto bad_fork_cleanup_files; + if ((retval = copy_sighand(clone_flags, p))) + goto bad_fork_cleanup_fs; + if ((retval = copy_signal(clone_flags, p))) + goto bad_fork_cleanup_sighand; + if ((retval = copy_mm(clone_flags, p))) + goto bad_fork_cleanup_signal; + if ((retval = copy_namespaces(clone_flags, p))) + goto bad_fork_cleanup_mm; + if ((retval = copy_io(clone_flags, p))) + goto bad_fork_cleanup_namespaces; + retval = copy_thread(clone_flags, stack_start, stack_size, p, regs); + if (retval) + goto bad_fork_cleanup_io; + + if (pid != &init_struct_pid) { + retval = -ENOMEM; + pid = alloc_pid(p->nsproxy->pid_ns); + if (!pid) + goto bad_fork_cleanup_io; + + if (clone_flags & CLONE_NEWPID) { + retval = pid_ns_prepare_proc(p->nsproxy->pid_ns); + if (retval < 0) + goto bad_fork_free_pid; + } + } + + p->pid = pid_nr(pid); + p->tgid = p->pid; + if (clone_flags & CLONE_THREAD) + p->tgid = current->tgid; + + if (current->nsproxy != p->nsproxy) { + retval = ns_cgroup_clone(p, pid); + if (retval) + goto bad_fork_free_pid; + } + + p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; + /* + * Clear TID on mm_release()? + */ + p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; +#ifdef CONFIG_FUTEX + p->robust_list = NULL; +#ifdef CONFIG_COMPAT + p->compat_robust_list = NULL; +#endif + INIT_LIST_HEAD(&p->pi_state_list); + p->pi_state_cache = NULL; +#endif + /* + * sigaltstack should be cleared when sharing the same VM + */ + if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) + p->sas_ss_sp = p->sas_ss_size = 0; + + /* + * Syscall tracing should be turned off in the child regardless + * of CLONE_PTRACE. + */ + clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); +#ifdef TIF_SYSCALL_EMU + clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); +#endif + clear_all_latency_tracing(p); + + /* ok, now we should be set up.. */ + p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); + p->pdeath_signal = 0; + p->exit_state = 0; + + /* + * Ok, make it visible to the rest of the system. + * We dont wake it up yet. + */ + p->group_leader = p; + INIT_LIST_HEAD(&p->thread_group); + + /* Now that the task is set up, run cgroup callbacks if + * necessary. We need to run them before the task is visible + * on the tasklist. */ + cgroup_fork_callbacks(p); + cgroup_callbacks_done = 1; + + /* Need tasklist lock for parent etc handling! */ + write_lock_irq(&tasklist_lock); + + /* CLONE_PARENT re-uses the old parent */ + if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { + p->real_parent = current->real_parent; + p->parent_exec_id = current->parent_exec_id; + } else { + p->real_parent = current; + p->parent_exec_id = current->self_exec_id; + } + + spin_lock(¤t->sighand->siglock); + + /* + * Process group and session signals need to be delivered to just the + * parent before the fork or both the parent and the child after the + * fork. Restart if a signal comes in before we add the new process to + * it's process group. + * A fatal signal pending means that current will exit, so the new + * thread can't slip out of an OOM kill (or normal SIGKILL). + */ + recalc_sigpending(); + if (signal_pending(current)) { + spin_unlock(¤t->sighand->siglock); + write_unlock_irq(&tasklist_lock); + retval = -ERESTARTNOINTR; + goto bad_fork_free_pid; + } + + if (clone_flags & CLONE_THREAD) { + atomic_inc(¤t->signal->count); + atomic_inc(¤t->signal->live); + p->group_leader = current->group_leader; + list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); + } + + if (likely(p->pid)) { + list_add_tail(&p->sibling, &p->real_parent->children); + tracehook_finish_clone(p, clone_flags, trace); + + if (thread_group_leader(p)) { + if (clone_flags & CLONE_NEWPID) + p->nsproxy->pid_ns->child_reaper = p; + + p->signal->leader_pid = pid; + tty_kref_put(p->signal->tty); + p->signal->tty = tty_kref_get(current->signal->tty); + attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); + attach_pid(p, PIDTYPE_SID, task_session(current)); + list_add_tail_rcu(&p->tasks, &init_task.tasks); + __get_cpu_var(process_counts)++; + } + attach_pid(p, PIDTYPE_PID, pid); + nr_threads++; + } + + total_forks++; + spin_unlock(¤t->sighand->siglock); + write_unlock_irq(&tasklist_lock); + proc_fork_connector(p); + cgroup_post_fork(p); + perf_event_fork(p); + return p; + +bad_fork_free_pid: + if (pid != &init_struct_pid) + free_pid(pid); +bad_fork_cleanup_io: + put_io_context(p->io_context); +bad_fork_cleanup_namespaces: + exit_task_namespaces(p); +bad_fork_cleanup_mm: + if (p->mm) + mmput(p->mm); +bad_fork_cleanup_signal: + if (!(clone_flags & CLONE_THREAD)) + __cleanup_signal(p->signal); +bad_fork_cleanup_sighand: + __cleanup_sighand(p->sighand); +bad_fork_cleanup_fs: + exit_fs(p); /* blocking */ +bad_fork_cleanup_files: + exit_files(p); /* blocking */ +bad_fork_cleanup_semundo: + exit_sem(p); +bad_fork_cleanup_audit: + audit_free(p); +bad_fork_cleanup_policy: + perf_event_free_task(p); +#ifdef CONFIG_NUMA + mpol_put(p->mempolicy); +bad_fork_cleanup_cgroup: +#endif + cgroup_exit(p, cgroup_callbacks_done); + delayacct_tsk_free(p); + module_put(task_thread_info(p)->exec_domain->module); +bad_fork_cleanup_count: + atomic_dec(&p->cred->user->processes); + exit_creds(p); +bad_fork_free: + free_task(p); +fork_out: + return ERR_PTR(retval); +} + +noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs) +{ + memset(regs, 0, sizeof(struct pt_regs)); + return regs; +} + +struct task_struct * __cpuinit fork_idle(int cpu) +{ + struct task_struct *task; + struct pt_regs regs; + + task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, + &init_struct_pid, 0); + if (!IS_ERR(task)) + init_idle(task, cpu); + + return task; +} + +/* + * Ok, this is the main fork-routine. + * + * It copies the process, and if successful kick-starts + * it and waits for it to finish using the VM if required. + */ +long do_fork(unsigned long clone_flags, + unsigned long stack_start, + struct pt_regs *regs, + unsigned long stack_size, + int __user *parent_tidptr, + int __user *child_tidptr) +{ + struct task_struct *p; + int trace = 0; + long nr; + + /* + * Do some preliminary argument and permissions checking before we + * actually start allocating stuff + */ + if (clone_flags & CLONE_NEWUSER) { + if (clone_flags & CLONE_THREAD) + return -EINVAL; + /* hopefully this check will go away when userns support is + * complete + */ + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) || + !capable(CAP_SETGID)) + return -EPERM; + } + + /* + * We hope to recycle these flags after 2.6.26 + */ + if (unlikely(clone_flags & CLONE_STOPPED)) { + static int __read_mostly count = 100; + + if (count > 0 && printk_ratelimit()) { + char comm[TASK_COMM_LEN]; + + count--; + printk(KERN_INFO "fork(): process `%s' used deprecated " + "clone flags 0x%lx\n", + get_task_comm(comm, current), + clone_flags & CLONE_STOPPED); + } + } + + /* + * When called from kernel_thread, don't do user tracing stuff. + */ + if (likely(user_mode(regs))) + trace = tracehook_prepare_clone(clone_flags); + + p = copy_process(clone_flags, stack_start, regs, stack_size, + child_tidptr, NULL, trace); + /* + * Do this prior waking up the new thread - the thread pointer + * might get invalid after that point, if the thread exits quickly. + */ + if (!IS_ERR(p)) { + struct completion vfork; + + trace_sched_process_fork(current, p); + + nr = task_pid_vnr(p); + + if (clone_flags & CLONE_PARENT_SETTID) + put_user(nr, parent_tidptr); + + if (clone_flags & CLONE_VFORK) { + p->vfork_done = &vfork; + init_completion(&vfork); + } + + audit_finish_fork(p); + tracehook_report_clone(regs, clone_flags, nr, p); + + /* + * We set PF_STARTING at creation in case tracing wants to + * use this to distinguish a fully live task from one that + * hasn't gotten to tracehook_report_clone() yet. Now we + * clear it and set the child going. + */ + p->flags &= ~PF_STARTING; + + if (unlikely(clone_flags & CLONE_STOPPED)) { + /* + * We'll start up with an immediate SIGSTOP. + */ + sigaddset(&p->pending.signal, SIGSTOP); + set_tsk_thread_flag(p, TIF_SIGPENDING); + __set_task_state(p, TASK_STOPPED); + } else { + wake_up_new_task(p, clone_flags); + } + + tracehook_report_clone_complete(trace, regs, + clone_flags, nr, p); + + if (clone_flags & CLONE_VFORK) { + freezer_do_not_count(); + wait_for_completion(&vfork); + freezer_count(); + tracehook_report_vfork_done(p, nr); + } + } else { + nr = PTR_ERR(p); + } + return nr; +} + +#ifndef ARCH_MIN_MMSTRUCT_ALIGN +#define ARCH_MIN_MMSTRUCT_ALIGN 0 +#endif + +static void sighand_ctor(void *data) +{ + struct sighand_struct *sighand = data; + + spin_lock_init(&sighand->siglock); + init_waitqueue_head(&sighand->signalfd_wqh); +} + +void __init proc_caches_init(void) +{ + sighand_cachep = kmem_cache_create("sighand_cache", + sizeof(struct sighand_struct), 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU| + SLAB_NOTRACK, sighand_ctor); + signal_cachep = kmem_cache_create("signal_cache", + sizeof(struct signal_struct), 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); + files_cachep = kmem_cache_create("files_cache", + sizeof(struct files_struct), 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); + fs_cachep = kmem_cache_create("fs_cache", + sizeof(struct fs_struct), 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); + mm_cachep = kmem_cache_create("mm_struct", + sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); + vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); + mmap_init(); +} + +/* + * Check constraints on flags passed to the unshare system call and + * force unsharing of additional process context as appropriate. + */ +static void check_unshare_flags(unsigned long *flags_ptr) +{ + /* + * If unsharing a thread from a thread group, must also + * unshare vm. + */ + if (*flags_ptr & CLONE_THREAD) + *flags_ptr |= CLONE_VM; + + /* + * If unsharing vm, must also unshare signal handlers. + */ + if (*flags_ptr & CLONE_VM) + *flags_ptr |= CLONE_SIGHAND; + + /* + * If unsharing signal handlers and the task was created + * using CLONE_THREAD, then must unshare the thread + */ + if ((*flags_ptr & CLONE_SIGHAND) && + (atomic_read(¤t->signal->count) > 1)) + *flags_ptr |= CLONE_THREAD; + + /* + * If unsharing namespace, must also unshare filesystem information. + */ + if (*flags_ptr & CLONE_NEWNS) + *flags_ptr |= CLONE_FS; +} + +/* + * Unsharing of tasks created with CLONE_THREAD is not supported yet + */ +static int unshare_thread(unsigned long unshare_flags) +{ + if (unshare_flags & CLONE_THREAD) + return -EINVAL; + + return 0; +} + +/* + * Unshare the filesystem structure if it is being shared + */ +static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) +{ + struct fs_struct *fs = current->fs; + + if (!(unshare_flags & CLONE_FS) || !fs) + return 0; + + /* don't need lock here; in the worst case we'll do useless copy */ + if (fs->users == 1) + return 0; + + *new_fsp = copy_fs_struct(fs); + if (!*new_fsp) + return -ENOMEM; + + return 0; +} + +/* + * Unsharing of sighand is not supported yet + */ +static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp) +{ + struct sighand_struct *sigh = current->sighand; + + if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1) + return -EINVAL; + else + return 0; +} + +/* + * Unshare vm if it is being shared + */ +static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp) +{ + struct mm_struct *mm = current->mm; + + if ((unshare_flags & CLONE_VM) && + (mm && atomic_read(&mm->mm_users) > 1)) { + return -EINVAL; + } + + return 0; +} + +/* + * Unshare file descriptor table if it is being shared + */ +static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) +{ + struct files_struct *fd = current->files; + int error = 0; + + if ((unshare_flags & CLONE_FILES) && + (fd && atomic_read(&fd->count) > 1)) { + *new_fdp = dup_fd(fd, &error); + if (!*new_fdp) + return error; + } + + return 0; +} + +/* + * unshare allows a process to 'unshare' part of the process + * context which was originally shared using clone. copy_* + * functions used by do_fork() cannot be used here directly + * because they modify an inactive task_struct that is being + * constructed. Here we are modifying the current, active, + * task_struct. + */ +SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) +{ + int err = 0; + struct fs_struct *fs, *new_fs = NULL; + struct sighand_struct *new_sigh = NULL; + struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL; + struct files_struct *fd, *new_fd = NULL; + struct nsproxy *new_nsproxy = NULL; + int do_sysvsem = 0; + + check_unshare_flags(&unshare_flags); + + /* Return -EINVAL for all unsupported flags */ + err = -EINVAL; + if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| + CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| + CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET)) + goto bad_unshare_out; + + /* + * CLONE_NEWIPC must also detach from the undolist: after switching + * to a new ipc namespace, the semaphore arrays from the old + * namespace are unreachable. + */ + if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) + do_sysvsem = 1; + if ((err = unshare_thread(unshare_flags))) + goto bad_unshare_out; + if ((err = unshare_fs(unshare_flags, &new_fs))) + goto bad_unshare_cleanup_thread; + if ((err = unshare_sighand(unshare_flags, &new_sigh))) + goto bad_unshare_cleanup_fs; + if ((err = unshare_vm(unshare_flags, &new_mm))) + goto bad_unshare_cleanup_sigh; + if ((err = unshare_fd(unshare_flags, &new_fd))) + goto bad_unshare_cleanup_vm; + if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, + new_fs))) + goto bad_unshare_cleanup_fd; + + if (new_fs || new_mm || new_fd || do_sysvsem || new_nsproxy) { + if (do_sysvsem) { + /* + * CLONE_SYSVSEM is equivalent to sys_exit(). + */ + exit_sem(current); + } + + if (new_nsproxy) { + switch_task_namespaces(current, new_nsproxy); + new_nsproxy = NULL; + } + + task_lock(current); + + if (new_fs) { + fs = current->fs; + write_lock(&fs->lock); + current->fs = new_fs; + if (--fs->users) + new_fs = NULL; + else + new_fs = fs; + write_unlock(&fs->lock); + } + + if (new_mm) { + mm = current->mm; + active_mm = current->active_mm; + current->mm = new_mm; + current->active_mm = new_mm; + activate_mm(active_mm, new_mm); + new_mm = mm; + } + + if (new_fd) { + fd = current->files; + current->files = new_fd; + new_fd = fd; + } + + task_unlock(current); + } + + if (new_nsproxy) + put_nsproxy(new_nsproxy); + +bad_unshare_cleanup_fd: + if (new_fd) + put_files_struct(new_fd); + +bad_unshare_cleanup_vm: + if (new_mm) + mmput(new_mm); + +bad_unshare_cleanup_sigh: + if (new_sigh) + if (atomic_dec_and_test(&new_sigh->count)) + kmem_cache_free(sighand_cachep, new_sigh); + +bad_unshare_cleanup_fs: + if (new_fs) + free_fs_struct(new_fs); + +bad_unshare_cleanup_thread: +bad_unshare_out: + return err; +} + +/* + * Helper to unshare the files of the current task. + * We don't want to expose copy_files internals to + * the exec layer of the kernel. + */ + +int unshare_files(struct files_struct **displaced) +{ + struct task_struct *task = current; + struct files_struct *copy = NULL; + int error; + + error = unshare_fd(CLONE_FILES, ©); + if (error || !copy) { + *displaced = NULL; + return error; + } + *displaced = task->files; + task_lock(task); + task->files = copy; + task_unlock(task); + return 0; +} diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/kthread.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/kthread.c --- kernel-2.6.32.54/linux-2.6.32/kernel/kthread.c 2012-01-16 15:01:39.904725462 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/kthread.c 2012-01-16 14:51:22.005408393 +0100 @@ -14,6 +14,7 @@ #include #include #include +#include #include static DEFINE_SPINLOCK(kthread_create_lock); diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/Makefile kernel-2.6.32.54.vs/linux-2.6.32/kernel/Makefile --- kernel-2.6.32.54/linux-2.6.32/kernel/Makefile 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/Makefile 2012-01-16 14:51:22.001408407 +0100 @@ -23,6 +23,7 @@ CFLAGS_REMOVE_sched_clock.o = -pg endif +obj-y += vserver/ obj-$(CONFIG_FREEZER) += freezer.o obj-$(CONFIG_PROFILING) += profile.o obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/nsproxy.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/nsproxy.c --- kernel-2.6.32.54/linux-2.6.32/kernel/nsproxy.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/nsproxy.c 2012-01-16 14:51:22.005408393 +0100 @@ -19,6 +19,8 @@ #include #include #include +#include +#include #include #include @@ -31,8 +33,11 @@ struct nsproxy *nsproxy; nsproxy = kmem_cache_alloc(nsproxy_cachep, GFP_KERNEL); - if (nsproxy) + if (nsproxy) { atomic_set(&nsproxy->count, 1); + atomic_inc(&vs_global_nsproxy); + } + vxdprintk(VXD_CBIT(space, 2), "create_nsproxy = %p[1]", nsproxy); return nsproxy; } @@ -41,41 +46,52 @@ * Return the newly created nsproxy. Do not attach this to the task, * leave it to the caller to do proper locking and attach it to task. */ -static struct nsproxy *create_new_namespaces(unsigned long flags, - struct task_struct *tsk, struct fs_struct *new_fs) +static struct nsproxy *unshare_namespaces(unsigned long flags, + struct nsproxy *orig, struct fs_struct *new_fs) { struct nsproxy *new_nsp; int err; + vxdprintk(VXD_CBIT(space, 4), + "unshare_namespaces(0x%08lx,%p,%p)", + flags, orig, new_fs); + new_nsp = create_nsproxy(); if (!new_nsp) return ERR_PTR(-ENOMEM); - new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, new_fs); + new_nsp->mnt_ns = copy_mnt_ns(flags, orig->mnt_ns, new_fs); if (IS_ERR(new_nsp->mnt_ns)) { err = PTR_ERR(new_nsp->mnt_ns); goto out_ns; } - new_nsp->uts_ns = copy_utsname(flags, tsk->nsproxy->uts_ns); + new_nsp->uts_ns = copy_utsname(flags, orig->uts_ns); if (IS_ERR(new_nsp->uts_ns)) { err = PTR_ERR(new_nsp->uts_ns); goto out_uts; } - new_nsp->ipc_ns = copy_ipcs(flags, tsk->nsproxy->ipc_ns); + new_nsp->ipc_ns = copy_ipcs(flags, orig->ipc_ns); if (IS_ERR(new_nsp->ipc_ns)) { err = PTR_ERR(new_nsp->ipc_ns); goto out_ipc; } - new_nsp->pid_ns = copy_pid_ns(flags, task_active_pid_ns(tsk)); + new_nsp->pid_ns = copy_pid_ns(flags, orig->pid_ns); if (IS_ERR(new_nsp->pid_ns)) { err = PTR_ERR(new_nsp->pid_ns); goto out_pid; } - new_nsp->net_ns = copy_net_ns(flags, tsk->nsproxy->net_ns); + /* disabled now? + new_nsp->user_ns = copy_user_ns(flags, orig->user_ns); + if (IS_ERR(new_nsp->user_ns)) { + err = PTR_ERR(new_nsp->user_ns); + goto out_user; + } */ + + new_nsp->net_ns = copy_net_ns(flags, orig->net_ns); if (IS_ERR(new_nsp->net_ns)) { err = PTR_ERR(new_nsp->net_ns); goto out_net; @@ -100,6 +116,38 @@ return ERR_PTR(err); } +static struct nsproxy *create_new_namespaces(int flags, struct task_struct *tsk, + struct fs_struct *new_fs) +{ + return unshare_namespaces(flags, tsk->nsproxy, new_fs); +} + +/* + * copies the nsproxy, setting refcount to 1, and grabbing a + * reference to all contained namespaces. + */ +struct nsproxy *copy_nsproxy(struct nsproxy *orig) +{ + struct nsproxy *ns = create_nsproxy(); + + if (ns) { + memcpy(ns, orig, sizeof(struct nsproxy)); + atomic_set(&ns->count, 1); + + if (ns->mnt_ns) + get_mnt_ns(ns->mnt_ns); + if (ns->uts_ns) + get_uts_ns(ns->uts_ns); + if (ns->ipc_ns) + get_ipc_ns(ns->ipc_ns); + if (ns->pid_ns) + get_pid_ns(ns->pid_ns); + if (ns->net_ns) + get_net(ns->net_ns); + } + return ns; +} + /* * called from clone. This now handles copy for nsproxy and all * namespaces therein. @@ -107,9 +155,12 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk) { struct nsproxy *old_ns = tsk->nsproxy; - struct nsproxy *new_ns; + struct nsproxy *new_ns = NULL; int err = 0; + vxdprintk(VXD_CBIT(space, 7), "copy_namespaces(0x%08lx,%p[%p])", + flags, tsk, old_ns); + if (!old_ns) return 0; @@ -119,7 +170,7 @@ CLONE_NEWPID | CLONE_NEWNET))) return 0; - if (!capable(CAP_SYS_ADMIN)) { + if (!vx_can_unshare(CAP_SYS_ADMIN, flags)) { err = -EPERM; goto out; } @@ -146,6 +197,9 @@ out: put_nsproxy(old_ns); + vxdprintk(VXD_CBIT(space, 3), + "copy_namespaces(0x%08lx,%p[%p]) = %d [%p]", + flags, tsk, old_ns, err, new_ns); return err; } @@ -159,7 +213,9 @@ put_ipc_ns(ns->ipc_ns); if (ns->pid_ns) put_pid_ns(ns->pid_ns); - put_net(ns->net_ns); + if (ns->net_ns) + put_net(ns->net_ns); + atomic_dec(&vs_global_nsproxy); kmem_cache_free(nsproxy_cachep, ns); } @@ -172,11 +228,15 @@ { int err = 0; + vxdprintk(VXD_CBIT(space, 4), + "unshare_nsproxy_namespaces(0x%08lx,[%p])", + unshare_flags, current->nsproxy); + if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWNET))) return 0; - if (!capable(CAP_SYS_ADMIN)) + if (!vx_can_unshare(CAP_SYS_ADMIN, unshare_flags)) return -EPERM; *new_nsp = create_new_namespaces(unshare_flags, current, diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/pid.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/pid.c --- kernel-2.6.32.54/linux-2.6.32/kernel/pid.c 2012-01-16 15:01:40.284724117 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/pid.c 2012-01-16 14:51:22.005408393 +0100 @@ -36,6 +36,7 @@ #include #include #include +#include #define pid_hashfn(nr, ns) \ hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) @@ -308,7 +309,7 @@ struct pid *find_vpid(int nr) { - return find_pid_ns(nr, current->nsproxy->pid_ns); + return find_pid_ns(vx_rmap_pid(nr), current->nsproxy->pid_ns); } EXPORT_SYMBOL_GPL(find_vpid); @@ -368,6 +369,9 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type) { struct task_struct *result = NULL; + + if (type == PIDTYPE_REALPID) + type = PIDTYPE_PID; if (pid) { struct hlist_node *first; first = rcu_dereference(pid->tasks[type].first); @@ -383,7 +387,7 @@ */ struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) { - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); + return pid_task(find_pid_ns(vx_rmap_pid(nr), ns), PIDTYPE_PID); } EXPORT_SYMBOL_GPL(find_task_by_pid_ns); @@ -426,7 +430,7 @@ } EXPORT_SYMBOL_GPL(find_get_pid); -pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) +pid_t pid_unmapped_nr_ns(struct pid *pid, struct pid_namespace *ns) { struct upid *upid; pid_t nr = 0; @@ -439,6 +443,11 @@ return nr; } +pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) +{ + return vx_map_pid(pid_unmapped_nr_ns(pid, ns)); +} + pid_t pid_vnr(struct pid *pid) { return pid_nr_ns(pid, current->nsproxy->pid_ns); diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/pid.c.orig kernel-2.6.32.54.vs/linux-2.6.32/kernel/pid.c.orig --- kernel-2.6.32.54/linux-2.6.32/kernel/pid.c.orig 2012-01-16 15:01:39.908725448 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/pid.c.orig 2012-01-16 14:47:19.478254844 +0100 @@ -385,6 +385,7 @@ { return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); } +EXPORT_SYMBOL_GPL(find_task_by_pid_ns); struct task_struct *find_task_by_vpid(pid_t vnr) { diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/pid_namespace.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/pid_namespace.c --- kernel-2.6.32.54/linux-2.6.32/kernel/pid_namespace.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/pid_namespace.c 2012-01-16 14:51:22.005408393 +0100 @@ -13,6 +13,7 @@ #include #include #include +#include #define BITS_PER_PAGE (PAGE_SIZE*8) @@ -86,6 +87,7 @@ goto out_free_map; kref_init(&ns->kref); + atomic_inc(&vs_global_pid_ns); ns->level = level; ns->parent = get_pid_ns(parent_pid_ns); @@ -111,6 +113,7 @@ for (i = 0; i < PIDMAP_ENTRIES; i++) kfree(ns->pidmap[i].page); + atomic_dec(&vs_global_pid_ns); kmem_cache_free(pid_ns_cachep, ns); } diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/posix-timers.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/posix-timers.c --- kernel-2.6.32.54/linux-2.6.32/kernel/posix-timers.c 2012-01-16 15:01:39.908725448 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/posix-timers.c 2012-01-16 14:51:22.013408365 +0100 @@ -46,6 +46,7 @@ #include #include #include +#include /* * Management arrays for POSIX timers. Timers are kept in slab memory @@ -363,6 +364,7 @@ { struct task_struct *task; int shared, ret = -1; + /* * FIXME: if ->sigq is queued we can race with * dequeue_signal()->do_schedule_next_timer(). @@ -379,10 +381,18 @@ rcu_read_lock(); task = pid_task(timr->it_pid, PIDTYPE_PID); if (task) { + struct vx_info_save vxis; + struct vx_info *vxi; + + vxi = get_vx_info(task->vx_info); + enter_vx_info(vxi, &vxis); shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID); ret = send_sigqueue(timr->sigq, task, shared); + leave_vx_info(&vxis); + put_vx_info(vxi); } rcu_read_unlock(); + /* If we failed to send the signal the timer stops. */ return ret > 0; } diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/printk.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/printk.c --- kernel-2.6.32.54/linux-2.6.32/kernel/printk.c 2012-01-16 15:01:40.324723975 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/printk.c 2012-01-16 14:51:22.013408365 +0100 @@ -34,6 +34,7 @@ #include #include #include +#include #include @@ -278,18 +279,13 @@ unsigned i, j, limit, count; int do_clear = 0; char c; - int error = 0; + int error; error = security_syslog(type); if (error) return error; - switch (type) { - case 0: /* Close log */ - break; - case 1: /* Open log */ - break; - case 2: /* Read from log */ + if ((type >= 2) && (type <= 4)) { error = -EINVAL; if (!buf || len < 0) goto out; @@ -300,6 +296,16 @@ error = -EFAULT; goto out; } + } + if (!vx_check(0, VS_ADMIN|VS_WATCH)) + return vx_do_syslog(type, buf, len); + + switch (type) { + case 0: /* Close log */ + break; + case 1: /* Open log */ + break; + case 2: /* Read from log */ error = wait_event_interruptible(log_wait, (log_start - log_end)); if (error) @@ -324,16 +330,6 @@ do_clear = 1; /* FALL THRU */ case 3: /* Read last kernel messages */ - error = -EINVAL; - if (!buf || len < 0) - goto out; - error = 0; - if (!len) - goto out; - if (!access_ok(VERIFY_WRITE, buf, len)) { - error = -EFAULT; - goto out; - } count = len; if (count > log_buf_len) count = log_buf_len; diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/printk.c.orig kernel-2.6.32.54.vs/linux-2.6.32/kernel/printk.c.orig --- kernel-2.6.32.54/linux-2.6.32/kernel/printk.c.orig 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/printk.c.orig 2012-01-16 14:47:19.534254649 +0100 @@ -0,0 +1,1413 @@ +/* + * linux/kernel/printk.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + * + * Modified to make sys_syslog() more flexible: added commands to + * return the last 4k of kernel messages, regardless of whether + * they've been read or not. Added option to suppress kernel printk's + * to the console. Added hook for sending the console messages + * elsewhere, in preparation for a serial line console (someday). + * Ted Ts'o, 2/11/93. + * Modified for sysctl support, 1/8/97, Chris Horn. + * Fixed SMP synchronization, 08/08/99, Manfred Spraul + * manfred@colorfullife.com + * Rewrote bits to get rid of console_lock + * 01Mar01 Andrew Morton + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* For in_interrupt() */ +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * for_each_console() allows you to iterate on each console + */ +#define for_each_console(con) \ + for (con = console_drivers; con != NULL; con = con->next) + +/* + * Architectures can override it: + */ +void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...) +{ +} + +#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) + +/* printk's without a loglevel use this.. */ +#define DEFAULT_MESSAGE_LOGLEVEL 4 /* KERN_WARNING */ + +/* We show everything that is MORE important than this.. */ +#define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ +#define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */ + +DECLARE_WAIT_QUEUE_HEAD(log_wait); + +int console_printk[4] = { + DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ + DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ + MINIMUM_CONSOLE_LOGLEVEL, /* minimum_console_loglevel */ + DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */ +}; +EXPORT_SYMBOL_GPL(console_printk); + +static int saved_console_loglevel = -1; + +/* + * Low level drivers may need that to know if they can schedule in + * their unblank() callback or not. So let's export it. + */ +int oops_in_progress; +EXPORT_SYMBOL(oops_in_progress); + +/* + * console_sem protects the console_drivers list, and also + * provides serialisation for access to the entire console + * driver system. + */ +static DECLARE_MUTEX(console_sem); +struct console *console_drivers; +EXPORT_SYMBOL_GPL(console_drivers); + +/* + * This is used for debugging the mess that is the VT code by + * keeping track if we have the console semaphore held. It's + * definitely not the perfect debug tool (we don't know if _WE_ + * hold it are racing, but it helps tracking those weird code + * path in the console code where we end up in places I want + * locked without the console sempahore held + */ +static int console_locked, console_suspended; + +/* + * logbuf_lock protects log_buf, log_start, log_end, con_start and logged_chars + * It is also used in interesting ways to provide interlocking in + * release_console_sem(). + */ +static DEFINE_SPINLOCK(logbuf_lock); + +#define LOG_BUF_MASK (log_buf_len-1) +#define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK]) + +/* + * The indices into log_buf are not constrained to log_buf_len - they + * must be masked before subscripting + */ +static unsigned log_start; /* Index into log_buf: next char to be read by syslog() */ +static unsigned con_start; /* Index into log_buf: next char to be sent to consoles */ +static unsigned log_end; /* Index into log_buf: most-recently-written-char + 1 */ + +/* + * Array of consoles built from command line options (console=) + */ +struct console_cmdline +{ + char name[8]; /* Name of the driver */ + int index; /* Minor dev. to use */ + char *options; /* Options for the driver */ +#ifdef CONFIG_A11Y_BRAILLE_CONSOLE + char *brl_options; /* Options for braille driver */ +#endif +}; + +#define MAX_CMDLINECONSOLES 8 + +static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; +static int selected_console = -1; +static int preferred_console = -1; +int console_set_on_cmdline; +EXPORT_SYMBOL(console_set_on_cmdline); + +/* Flag: console code may call schedule() */ +static int console_may_schedule; + +#ifdef CONFIG_PRINTK + +static char __log_buf[__LOG_BUF_LEN]; +static char *log_buf = __log_buf; +static int log_buf_len = __LOG_BUF_LEN; +static unsigned logged_chars; /* Number of chars produced since last read+clear operation */ + +#ifdef CONFIG_KEXEC +/* + * This appends the listed symbols to /proc/vmcoreinfo + * + * /proc/vmcoreinfo is used by various utiilties, like crash and makedumpfile to + * obtain access to symbols that are otherwise very difficult to locate. These + * symbols are specifically used so that utilities can access and extract the + * dmesg log from a vmcore file after a crash. + */ +void log_buf_kexec_setup(void) +{ + VMCOREINFO_SYMBOL(log_buf); + VMCOREINFO_SYMBOL(log_end); + VMCOREINFO_SYMBOL(log_buf_len); + VMCOREINFO_SYMBOL(logged_chars); +} +#endif + +static int __init log_buf_len_setup(char *str) +{ + unsigned size = memparse(str, &str); + unsigned long flags; + + if (size) + size = roundup_pow_of_two(size); + if (size > log_buf_len) { + unsigned start, dest_idx, offset; + char *new_log_buf; + + new_log_buf = alloc_bootmem(size); + if (!new_log_buf) { + printk(KERN_WARNING "log_buf_len: allocation failed\n"); + goto out; + } + + spin_lock_irqsave(&logbuf_lock, flags); + log_buf_len = size; + log_buf = new_log_buf; + + offset = start = min(con_start, log_start); + dest_idx = 0; + while (start != log_end) { + log_buf[dest_idx] = __log_buf[start & (__LOG_BUF_LEN - 1)]; + start++; + dest_idx++; + } + log_start -= offset; + con_start -= offset; + log_end -= offset; + spin_unlock_irqrestore(&logbuf_lock, flags); + + printk(KERN_NOTICE "log_buf_len: %d\n", log_buf_len); + } +out: + return 1; +} + +__setup("log_buf_len=", log_buf_len_setup); + +#ifdef CONFIG_BOOT_PRINTK_DELAY + +static unsigned int boot_delay; /* msecs delay after each printk during bootup */ +static unsigned long long loops_per_msec; /* based on boot_delay */ + +static int __init boot_delay_setup(char *str) +{ + unsigned long lpj; + + lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */ + loops_per_msec = (unsigned long long)lpj / 1000 * HZ; + + get_option(&str, &boot_delay); + if (boot_delay > 10 * 1000) + boot_delay = 0; + + pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, " + "HZ: %d, loops_per_msec: %llu\n", + boot_delay, preset_lpj, lpj, HZ, loops_per_msec); + return 1; +} +__setup("boot_delay=", boot_delay_setup); + +static void boot_delay_msec(void) +{ + unsigned long long k; + unsigned long timeout; + + if (boot_delay == 0 || system_state != SYSTEM_BOOTING) + return; + + k = (unsigned long long)loops_per_msec * boot_delay; + + timeout = jiffies + msecs_to_jiffies(boot_delay); + while (k) { + k--; + cpu_relax(); + /* + * use (volatile) jiffies to prevent + * compiler reduction; loop termination via jiffies + * is secondary and may or may not happen. + */ + if (time_after(jiffies, timeout)) + break; + touch_nmi_watchdog(); + } +} +#else +static inline void boot_delay_msec(void) +{ +} +#endif + +/* + * Commands to do_syslog: + * + * 0 -- Close the log. Currently a NOP. + * 1 -- Open the log. Currently a NOP. + * 2 -- Read from the log. + * 3 -- Read all messages remaining in the ring buffer. + * 4 -- Read and clear all messages remaining in the ring buffer + * 5 -- Clear ring buffer. + * 6 -- Disable printk's to console + * 7 -- Enable printk's to console + * 8 -- Set level of messages printed to console + * 9 -- Return number of unread characters in the log buffer + * 10 -- Return size of the log buffer + */ +int do_syslog(int type, char __user *buf, int len) +{ + unsigned i, j, limit, count; + int do_clear = 0; + char c; + int error = 0; + + error = security_syslog(type); + if (error) + return error; + + switch (type) { + case 0: /* Close log */ + break; + case 1: /* Open log */ + break; + case 2: /* Read from log */ + error = -EINVAL; + if (!buf || len < 0) + goto out; + error = 0; + if (!len) + goto out; + if (!access_ok(VERIFY_WRITE, buf, len)) { + error = -EFAULT; + goto out; + } + error = wait_event_interruptible(log_wait, + (log_start - log_end)); + if (error) + goto out; + i = 0; + spin_lock_irq(&logbuf_lock); + while (!error && (log_start != log_end) && i < len) { + c = LOG_BUF(log_start); + log_start++; + spin_unlock_irq(&logbuf_lock); + error = __put_user(c,buf); + buf++; + i++; + cond_resched(); + spin_lock_irq(&logbuf_lock); + } + spin_unlock_irq(&logbuf_lock); + if (!error) + error = i; + break; + case 4: /* Read/clear last kernel messages */ + do_clear = 1; + /* FALL THRU */ + case 3: /* Read last kernel messages */ + error = -EINVAL; + if (!buf || len < 0) + goto out; + error = 0; + if (!len) + goto out; + if (!access_ok(VERIFY_WRITE, buf, len)) { + error = -EFAULT; + goto out; + } + count = len; + if (count > log_buf_len) + count = log_buf_len; + spin_lock_irq(&logbuf_lock); + if (count > logged_chars) + count = logged_chars; + if (do_clear) + logged_chars = 0; + limit = log_end; + /* + * __put_user() could sleep, and while we sleep + * printk() could overwrite the messages + * we try to copy to user space. Therefore + * the messages are copied in reverse. + */ + for (i = 0; i < count && !error; i++) { + j = limit-1-i; + if (j + log_buf_len < log_end) + break; + c = LOG_BUF(j); + spin_unlock_irq(&logbuf_lock); + error = __put_user(c,&buf[count-1-i]); + cond_resched(); + spin_lock_irq(&logbuf_lock); + } + spin_unlock_irq(&logbuf_lock); + if (error) + break; + error = i; + if (i != count) { + int offset = count-error; + /* buffer overflow during copy, correct user buffer. */ + for (i = 0; i < error; i++) { + if (__get_user(c,&buf[i+offset]) || + __put_user(c,&buf[i])) { + error = -EFAULT; + break; + } + cond_resched(); + } + } + break; + case 5: /* Clear ring buffer */ + logged_chars = 0; + break; + case 6: /* Disable logging to console */ + if (saved_console_loglevel == -1) + saved_console_loglevel = console_loglevel; + console_loglevel = minimum_console_loglevel; + break; + case 7: /* Enable logging to console */ + if (saved_console_loglevel != -1) { + console_loglevel = saved_console_loglevel; + saved_console_loglevel = -1; + } + break; + case 8: /* Set level of messages printed to console */ + error = -EINVAL; + if (len < 1 || len > 8) + goto out; + if (len < minimum_console_loglevel) + len = minimum_console_loglevel; + console_loglevel = len; + /* Implicitly re-enable logging to console */ + saved_console_loglevel = -1; + error = 0; + break; + case 9: /* Number of chars in the log buffer */ + error = log_end - log_start; + break; + case 10: /* Size of the log buffer */ + error = log_buf_len; + break; + default: + error = -EINVAL; + break; + } +out: + return error; +} + +SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) +{ + return do_syslog(type, buf, len); +} + +/* + * Call the console drivers on a range of log_buf + */ +static void __call_console_drivers(unsigned start, unsigned end) +{ + struct console *con; + + for_each_console(con) { + if ((con->flags & CON_ENABLED) && con->write && + (cpu_online(smp_processor_id()) || + (con->flags & CON_ANYTIME))) + con->write(con, &LOG_BUF(start), end - start); + } +} + +static int __read_mostly ignore_loglevel; + +static int __init ignore_loglevel_setup(char *str) +{ + ignore_loglevel = 1; + printk(KERN_INFO "debug: ignoring loglevel setting.\n"); + + return 0; +} + +early_param("ignore_loglevel", ignore_loglevel_setup); + +/* + * Write out chars from start to end - 1 inclusive + */ +static void _call_console_drivers(unsigned start, + unsigned end, int msg_log_level) +{ + if ((msg_log_level < console_loglevel || ignore_loglevel) && + console_drivers && start != end) { + if ((start & LOG_BUF_MASK) > (end & LOG_BUF_MASK)) { + /* wrapped write */ + __call_console_drivers(start & LOG_BUF_MASK, + log_buf_len); + __call_console_drivers(0, end & LOG_BUF_MASK); + } else { + __call_console_drivers(start, end); + } + } +} + +/* + * Call the console drivers, asking them to write out + * log_buf[start] to log_buf[end - 1]. + * The console_sem must be held. + */ +static void call_console_drivers(unsigned start, unsigned end) +{ + unsigned cur_index, start_print; + static int msg_level = -1; + + BUG_ON(((int)(start - end)) > 0); + + cur_index = start; + start_print = start; + while (cur_index != end) { + if (msg_level < 0 && ((end - cur_index) > 2) && + LOG_BUF(cur_index + 0) == '<' && + LOG_BUF(cur_index + 1) >= '0' && + LOG_BUF(cur_index + 1) <= '7' && + LOG_BUF(cur_index + 2) == '>') { + msg_level = LOG_BUF(cur_index + 1) - '0'; + cur_index += 3; + start_print = cur_index; + } + while (cur_index != end) { + char c = LOG_BUF(cur_index); + + cur_index++; + if (c == '\n') { + if (msg_level < 0) { + /* + * printk() has already given us loglevel tags in + * the buffer. This code is here in case the + * log buffer has wrapped right round and scribbled + * on those tags + */ + msg_level = default_message_loglevel; + } + _call_console_drivers(start_print, cur_index, msg_level); + msg_level = -1; + start_print = cur_index; + break; + } + } + } + _call_console_drivers(start_print, end, msg_level); +} + +static void emit_log_char(char c) +{ + LOG_BUF(log_end) = c; + log_end++; + if (log_end - log_start > log_buf_len) + log_start = log_end - log_buf_len; + if (log_end - con_start > log_buf_len) + con_start = log_end - log_buf_len; + if (logged_chars < log_buf_len) + logged_chars++; +} + +/* + * Zap console related locks when oopsing. Only zap at most once + * every 10 seconds, to leave time for slow consoles to print a + * full oops. + */ +static void zap_locks(void) +{ + static unsigned long oops_timestamp; + + if (time_after_eq(jiffies, oops_timestamp) && + !time_after(jiffies, oops_timestamp + 30 * HZ)) + return; + + oops_timestamp = jiffies; + + /* If a crash is occurring, make sure we can't deadlock */ + spin_lock_init(&logbuf_lock); + /* And make sure that we print immediately */ + init_MUTEX(&console_sem); +} + +#if defined(CONFIG_PRINTK_TIME) +static int printk_time = 1; +#else +static int printk_time = 0; +#endif +module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR); + +/* Check if we have any console registered that can be called early in boot. */ +static int have_callable_console(void) +{ + struct console *con; + + for_each_console(con) + if (con->flags & CON_ANYTIME) + return 1; + + return 0; +} + +/** + * printk - print a kernel message + * @fmt: format string + * + * This is printk(). It can be called from any context. We want it to work. + * + * We try to grab the console_sem. If we succeed, it's easy - we log the output and + * call the console drivers. If we fail to get the semaphore we place the output + * into the log buffer and return. The current holder of the console_sem will + * notice the new output in release_console_sem() and will send it to the + * consoles before releasing the semaphore. + * + * One effect of this deferred printing is that code which calls printk() and + * then changes console_loglevel may break. This is because console_loglevel + * is inspected when the actual printing occurs. + * + * See also: + * printf(3) + * + * See the vsnprintf() documentation for format string extensions over C99. + */ + +asmlinkage int printk(const char *fmt, ...) +{ + va_list args; + int r; + + va_start(args, fmt); + r = vprintk(fmt, args); + va_end(args); + + return r; +} + +/* cpu currently holding logbuf_lock */ +static volatile unsigned int printk_cpu = UINT_MAX; + +/* + * Can we actually use the console at this time on this cpu? + * + * Console drivers may assume that per-cpu resources have + * been allocated. So unless they're explicitly marked as + * being able to cope (CON_ANYTIME) don't call them until + * this CPU is officially up. + */ +static inline int can_use_console(unsigned int cpu) +{ + return cpu_online(cpu) || have_callable_console(); +} + +/* + * Try to get console ownership to actually show the kernel + * messages from a 'printk'. Return true (and with the + * console_semaphore held, and 'console_locked' set) if it + * is successful, false otherwise. + * + * This gets called with the 'logbuf_lock' spinlock held and + * interrupts disabled. It should return with 'lockbuf_lock' + * released but interrupts still disabled. + */ +static int acquire_console_semaphore_for_printk(unsigned int cpu) +{ + int retval = 0; + + if (!try_acquire_console_sem()) { + retval = 1; + + /* + * If we can't use the console, we need to release + * the console semaphore by hand to avoid flushing + * the buffer. We need to hold the console semaphore + * in order to do this test safely. + */ + if (!can_use_console(cpu)) { + console_locked = 0; + up(&console_sem); + retval = 0; + } + } + printk_cpu = UINT_MAX; + spin_unlock(&logbuf_lock); + return retval; +} +static const char recursion_bug_msg [] = + KERN_CRIT "BUG: recent printk recursion!\n"; +static int recursion_bug; +static int new_text_line = 1; +static char printk_buf[1024]; + +int printk_delay_msec __read_mostly; + +static inline void printk_delay(void) +{ + if (unlikely(printk_delay_msec)) { + int m = printk_delay_msec; + + while (m--) { + mdelay(1); + touch_nmi_watchdog(); + } + } +} + +asmlinkage int vprintk(const char *fmt, va_list args) +{ + int printed_len = 0; + int current_log_level = default_message_loglevel; + unsigned long flags; + int this_cpu; + char *p; + + boot_delay_msec(); + printk_delay(); + + preempt_disable(); + /* This stops the holder of console_sem just where we want him */ + raw_local_irq_save(flags); + this_cpu = smp_processor_id(); + + /* + * Ouch, printk recursed into itself! + */ + if (unlikely(printk_cpu == this_cpu)) { + /* + * If a crash is occurring during printk() on this CPU, + * then try to get the crash message out but make sure + * we can't deadlock. Otherwise just return to avoid the + * recursion and return - but flag the recursion so that + * it can be printed at the next appropriate moment: + */ + if (!oops_in_progress) { + recursion_bug = 1; + goto out_restore_irqs; + } + zap_locks(); + } + + lockdep_off(); + spin_lock(&logbuf_lock); + printk_cpu = this_cpu; + + if (recursion_bug) { + recursion_bug = 0; + strcpy(printk_buf, recursion_bug_msg); + printed_len = strlen(recursion_bug_msg); + } + /* Emit the output into the temporary buffer */ + printed_len += vscnprintf(printk_buf + printed_len, + sizeof(printk_buf) - printed_len, fmt, args); + + + p = printk_buf; + + /* Do we have a loglevel in the string? */ + if (p[0] == '<') { + unsigned char c = p[1]; + if (c && p[2] == '>') { + switch (c) { + case '0' ... '7': /* loglevel */ + current_log_level = c - '0'; + /* Fallthrough - make sure we're on a new line */ + case 'd': /* KERN_DEFAULT */ + if (!new_text_line) { + emit_log_char('\n'); + new_text_line = 1; + } + /* Fallthrough - skip the loglevel */ + case 'c': /* KERN_CONT */ + p += 3; + break; + } + } + } + + /* + * Copy the output into log_buf. If the caller didn't provide + * appropriate log level tags, we insert them here + */ + for ( ; *p; p++) { + if (new_text_line) { + /* Always output the token */ + emit_log_char('<'); + emit_log_char(current_log_level + '0'); + emit_log_char('>'); + printed_len += 3; + new_text_line = 0; + + if (printk_time) { + /* Follow the token with the time */ + char tbuf[50], *tp; + unsigned tlen; + unsigned long long t; + unsigned long nanosec_rem; + + t = cpu_clock(printk_cpu); + nanosec_rem = do_div(t, 1000000000); + tlen = sprintf(tbuf, "[%5lu.%06lu] ", + (unsigned long) t, + nanosec_rem / 1000); + + for (tp = tbuf; tp < tbuf + tlen; tp++) + emit_log_char(*tp); + printed_len += tlen; + } + + if (!*p) + break; + } + + emit_log_char(*p); + if (*p == '\n') + new_text_line = 1; + } + + /* + * Try to acquire and then immediately release the + * console semaphore. The release will do all the + * actual magic (print out buffers, wake up klogd, + * etc). + * + * The acquire_console_semaphore_for_printk() function + * will release 'logbuf_lock' regardless of whether it + * actually gets the semaphore or not. + */ + if (acquire_console_semaphore_for_printk(this_cpu)) + release_console_sem(); + + lockdep_on(); +out_restore_irqs: + raw_local_irq_restore(flags); + + preempt_enable(); + return printed_len; +} +EXPORT_SYMBOL(printk); +EXPORT_SYMBOL(vprintk); + +#else + +static void call_console_drivers(unsigned start, unsigned end) +{ +} + +#endif + +static int __add_preferred_console(char *name, int idx, char *options, + char *brl_options) +{ + struct console_cmdline *c; + int i; + + /* + * See if this tty is not yet registered, and + * if we have a slot free. + */ + for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++) + if (strcmp(console_cmdline[i].name, name) == 0 && + console_cmdline[i].index == idx) { + if (!brl_options) + selected_console = i; + return 0; + } + if (i == MAX_CMDLINECONSOLES) + return -E2BIG; + if (!brl_options) + selected_console = i; + c = &console_cmdline[i]; + strlcpy(c->name, name, sizeof(c->name)); + c->options = options; +#ifdef CONFIG_A11Y_BRAILLE_CONSOLE + c->brl_options = brl_options; +#endif + c->index = idx; + return 0; +} +/* + * Set up a list of consoles. Called from init/main.c + */ +static int __init console_setup(char *str) +{ + char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for index */ + char *s, *options, *brl_options = NULL; + int idx; + +#ifdef CONFIG_A11Y_BRAILLE_CONSOLE + if (!memcmp(str, "brl,", 4)) { + brl_options = ""; + str += 4; + } else if (!memcmp(str, "brl=", 4)) { + brl_options = str + 4; + str = strchr(brl_options, ','); + if (!str) { + printk(KERN_ERR "need port name after brl=\n"); + return 1; + } + *(str++) = 0; + } +#endif + + /* + * Decode str into name, index, options. + */ + if (str[0] >= '0' && str[0] <= '9') { + strcpy(buf, "ttyS"); + strncpy(buf + 4, str, sizeof(buf) - 5); + } else { + strncpy(buf, str, sizeof(buf) - 1); + } + buf[sizeof(buf) - 1] = 0; + if ((options = strchr(str, ',')) != NULL) + *(options++) = 0; +#ifdef __sparc__ + if (!strcmp(str, "ttya")) + strcpy(buf, "ttyS0"); + if (!strcmp(str, "ttyb")) + strcpy(buf, "ttyS1"); +#endif + for (s = buf; *s; s++) + if ((*s >= '0' && *s <= '9') || *s == ',') + break; + idx = simple_strtoul(s, NULL, 10); + *s = 0; + + __add_preferred_console(buf, idx, options, brl_options); + console_set_on_cmdline = 1; + return 1; +} +__setup("console=", console_setup); + +/** + * add_preferred_console - add a device to the list of preferred consoles. + * @name: device name + * @idx: device index + * @options: options for this console + * + * The last preferred console added will be used for kernel messages + * and stdin/out/err for init. Normally this is used by console_setup + * above to handle user-supplied console arguments; however it can also + * be used by arch-specific code either to override the user or more + * commonly to provide a default console (ie from PROM variables) when + * the user has not supplied one. + */ +int add_preferred_console(char *name, int idx, char *options) +{ + return __add_preferred_console(name, idx, options, NULL); +} + +int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options) +{ + struct console_cmdline *c; + int i; + + for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++) + if (strcmp(console_cmdline[i].name, name) == 0 && + console_cmdline[i].index == idx) { + c = &console_cmdline[i]; + strlcpy(c->name, name_new, sizeof(c->name)); + c->name[sizeof(c->name) - 1] = 0; + c->options = options; + c->index = idx_new; + return i; + } + /* not found */ + return -1; +} + +int console_suspend_enabled = 1; +EXPORT_SYMBOL(console_suspend_enabled); + +static int __init console_suspend_disable(char *str) +{ + console_suspend_enabled = 0; + return 1; +} +__setup("no_console_suspend", console_suspend_disable); + +/** + * suspend_console - suspend the console subsystem + * + * This disables printk() while we go into suspend states + */ +void suspend_console(void) +{ + if (!console_suspend_enabled) + return; + printk("Suspending console(s) (use no_console_suspend to debug)\n"); + acquire_console_sem(); + console_suspended = 1; + up(&console_sem); +} +EXPORT_SYMBOL_GPL(suspend_console); + +void resume_console(void) +{ + if (!console_suspend_enabled) + return; + down(&console_sem); + console_suspended = 0; + release_console_sem(); +} +EXPORT_SYMBOL_GPL(resume_console); + +/** + * acquire_console_sem - lock the console system for exclusive use. + * + * Acquires a semaphore which guarantees that the caller has + * exclusive access to the console system and the console_drivers list. + * + * Can sleep, returns nothing. + */ +void acquire_console_sem(void) +{ + BUG_ON(in_interrupt()); + down(&console_sem); + if (console_suspended) + return; + console_locked = 1; + console_may_schedule = 1; +} +EXPORT_SYMBOL(acquire_console_sem); + +int try_acquire_console_sem(void) +{ + if (down_trylock(&console_sem)) + return -1; + if (console_suspended) { + up(&console_sem); + return -1; + } + console_locked = 1; + console_may_schedule = 0; + return 0; +} +EXPORT_SYMBOL(try_acquire_console_sem); + +int is_console_locked(void) +{ + return console_locked; +} + +static DEFINE_PER_CPU(int, printk_pending); + +void printk_tick(void) +{ + if (__get_cpu_var(printk_pending)) { + __get_cpu_var(printk_pending) = 0; + wake_up_interruptible(&log_wait); + } +} + +int printk_needs_cpu(int cpu) +{ + if (unlikely(cpu_is_offline(cpu))) + printk_tick(); + return per_cpu(printk_pending, cpu); +} + +void wake_up_klogd(void) +{ + if (waitqueue_active(&log_wait)) + __raw_get_cpu_var(printk_pending) = 1; +} + +/** + * release_console_sem - unlock the console system + * + * Releases the semaphore which the caller holds on the console system + * and the console driver list. + * + * While the semaphore was held, console output may have been buffered + * by printk(). If this is the case, release_console_sem() emits + * the output prior to releasing the semaphore. + * + * If there is output waiting for klogd, we wake it up. + * + * release_console_sem() may be called from any context. + */ +void release_console_sem(void) +{ + unsigned long flags; + unsigned _con_start, _log_end; + unsigned wake_klogd = 0; + + if (console_suspended) { + up(&console_sem); + return; + } + + console_may_schedule = 0; + + for ( ; ; ) { + spin_lock_irqsave(&logbuf_lock, flags); + wake_klogd |= log_start - log_end; + if (con_start == log_end) + break; /* Nothing to print */ + _con_start = con_start; + _log_end = log_end; + con_start = log_end; /* Flush */ + spin_unlock(&logbuf_lock); + stop_critical_timings(); /* don't trace print latency */ + call_console_drivers(_con_start, _log_end); + start_critical_timings(); + local_irq_restore(flags); + } + console_locked = 0; + up(&console_sem); + spin_unlock_irqrestore(&logbuf_lock, flags); + if (wake_klogd) + wake_up_klogd(); +} +EXPORT_SYMBOL(release_console_sem); + +/** + * console_conditional_schedule - yield the CPU if required + * + * If the console code is currently allowed to sleep, and + * if this CPU should yield the CPU to another task, do + * so here. + * + * Must be called within acquire_console_sem(). + */ +void __sched console_conditional_schedule(void) +{ + if (console_may_schedule) + cond_resched(); +} +EXPORT_SYMBOL(console_conditional_schedule); + +void console_unblank(void) +{ + struct console *c; + + /* + * console_unblank can no longer be called in interrupt context unless + * oops_in_progress is set to 1.. + */ + if (oops_in_progress) { + if (down_trylock(&console_sem) != 0) + return; + } else + acquire_console_sem(); + + console_locked = 1; + console_may_schedule = 0; + for_each_console(c) + if ((c->flags & CON_ENABLED) && c->unblank) + c->unblank(); + release_console_sem(); +} + +/* + * Return the console tty driver structure and its associated index + */ +struct tty_driver *console_device(int *index) +{ + struct console *c; + struct tty_driver *driver = NULL; + + acquire_console_sem(); + for_each_console(c) { + if (!c->device) + continue; + driver = c->device(c, index); + if (driver) + break; + } + release_console_sem(); + return driver; +} + +/* + * Prevent further output on the passed console device so that (for example) + * serial drivers can disable console output before suspending a port, and can + * re-enable output afterwards. + */ +void console_stop(struct console *console) +{ + acquire_console_sem(); + console->flags &= ~CON_ENABLED; + release_console_sem(); +} +EXPORT_SYMBOL(console_stop); + +void console_start(struct console *console) +{ + acquire_console_sem(); + console->flags |= CON_ENABLED; + release_console_sem(); +} +EXPORT_SYMBOL(console_start); + +/* + * The console driver calls this routine during kernel initialization + * to register the console printing procedure with printk() and to + * print any messages that were printed by the kernel before the + * console driver was initialized. + * + * This can happen pretty early during the boot process (because of + * early_printk) - sometimes before setup_arch() completes - be careful + * of what kernel features are used - they may not be initialised yet. + * + * There are two types of consoles - bootconsoles (early_printk) and + * "real" consoles (everything which is not a bootconsole) which are + * handled differently. + * - Any number of bootconsoles can be registered at any time. + * - As soon as a "real" console is registered, all bootconsoles + * will be unregistered automatically. + * - Once a "real" console is registered, any attempt to register a + * bootconsoles will be rejected + */ +void register_console(struct console *newcon) +{ + int i; + unsigned long flags; + struct console *bcon = NULL; + + /* + * before we register a new CON_BOOT console, make sure we don't + * already have a valid console + */ + if (console_drivers && newcon->flags & CON_BOOT) { + /* find the last or real console */ + for_each_console(bcon) { + if (!(bcon->flags & CON_BOOT)) { + printk(KERN_INFO "Too late to register bootconsole %s%d\n", + newcon->name, newcon->index); + return; + } + } + } + + if (console_drivers && console_drivers->flags & CON_BOOT) + bcon = console_drivers; + + if (preferred_console < 0 || bcon || !console_drivers) + preferred_console = selected_console; + + if (newcon->early_setup) + newcon->early_setup(); + + /* + * See if we want to use this console driver. If we + * didn't select a console we take the first one + * that registers here. + */ + if (preferred_console < 0) { + if (newcon->index < 0) + newcon->index = 0; + if (newcon->setup == NULL || + newcon->setup(newcon, NULL) == 0) { + newcon->flags |= CON_ENABLED; + if (newcon->device) { + newcon->flags |= CON_CONSDEV; + preferred_console = 0; + } + } + } + + /* + * See if this console matches one we selected on + * the command line. + */ + for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; + i++) { + if (strcmp(console_cmdline[i].name, newcon->name) != 0) + continue; + if (newcon->index >= 0 && + newcon->index != console_cmdline[i].index) + continue; + if (newcon->index < 0) + newcon->index = console_cmdline[i].index; +#ifdef CONFIG_A11Y_BRAILLE_CONSOLE + if (console_cmdline[i].brl_options) { + newcon->flags |= CON_BRL; + braille_register_console(newcon, + console_cmdline[i].index, + console_cmdline[i].options, + console_cmdline[i].brl_options); + return; + } +#endif + if (newcon->setup && + newcon->setup(newcon, console_cmdline[i].options) != 0) + break; + newcon->flags |= CON_ENABLED; + newcon->index = console_cmdline[i].index; + if (i == selected_console) { + newcon->flags |= CON_CONSDEV; + preferred_console = selected_console; + } + break; + } + + if (!(newcon->flags & CON_ENABLED)) + return; + + /* + * If we have a bootconsole, and are switching to a real console, + * don't print everything out again, since when the boot console, and + * the real console are the same physical device, it's annoying to + * see the beginning boot messages twice + */ + if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) + newcon->flags &= ~CON_PRINTBUFFER; + + /* + * Put this console in the list - keep the + * preferred driver at the head of the list. + */ + acquire_console_sem(); + if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) { + newcon->next = console_drivers; + console_drivers = newcon; + if (newcon->next) + newcon->next->flags &= ~CON_CONSDEV; + } else { + newcon->next = console_drivers->next; + console_drivers->next = newcon; + } + if (newcon->flags & CON_PRINTBUFFER) { + /* + * release_console_sem() will print out the buffered messages + * for us. + */ + spin_lock_irqsave(&logbuf_lock, flags); + con_start = log_start; + spin_unlock_irqrestore(&logbuf_lock, flags); + } + release_console_sem(); + + /* + * By unregistering the bootconsoles after we enable the real console + * we get the "console xxx enabled" message on all the consoles - + * boot consoles, real consoles, etc - this is to ensure that end + * users know there might be something in the kernel's log buffer that + * went to the bootconsole (that they do not see on the real console) + */ + if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) { + /* we need to iterate through twice, to make sure we print + * everything out, before we unregister the console(s) + */ + printk(KERN_INFO "console [%s%d] enabled, bootconsole disabled\n", + newcon->name, newcon->index); + for_each_console(bcon) + if (bcon->flags & CON_BOOT) + unregister_console(bcon); + } else { + printk(KERN_INFO "%sconsole [%s%d] enabled\n", + (newcon->flags & CON_BOOT) ? "boot" : "" , + newcon->name, newcon->index); + } +} +EXPORT_SYMBOL(register_console); + +int unregister_console(struct console *console) +{ + struct console *a, *b; + int res = 1; + +#ifdef CONFIG_A11Y_BRAILLE_CONSOLE + if (console->flags & CON_BRL) + return braille_unregister_console(console); +#endif + + acquire_console_sem(); + if (console_drivers == console) { + console_drivers=console->next; + res = 0; + } else if (console_drivers) { + for (a=console_drivers->next, b=console_drivers ; + a; b=a, a=b->next) { + if (a == console) { + b->next = a->next; + res = 0; + break; + } + } + } + + /* + * If this isn't the last console and it has CON_CONSDEV set, we + * need to set it on the next preferred console. + */ + if (console_drivers != NULL && console->flags & CON_CONSDEV) + console_drivers->flags |= CON_CONSDEV; + + release_console_sem(); + return res; +} +EXPORT_SYMBOL(unregister_console); + +static int __init disable_boot_consoles(void) +{ + struct console *con; + + for_each_console(con) { + if (con->flags & CON_BOOT) { + printk(KERN_INFO "turn off boot console %s%d\n", + con->name, con->index); + unregister_console(con); + } + } + return 0; +} +late_initcall(disable_boot_consoles); + +#if defined CONFIG_PRINTK + +/* + * printk rate limiting, lifted from the networking subsystem. + * + * This enforces a rate limit: not more than 10 kernel messages + * every 5s to make a denial-of-service attack impossible. + */ +DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); + +int printk_ratelimit(void) +{ + return __ratelimit(&printk_ratelimit_state); +} +EXPORT_SYMBOL(printk_ratelimit); + +/** + * printk_timed_ratelimit - caller-controlled printk ratelimiting + * @caller_jiffies: pointer to caller's state + * @interval_msecs: minimum interval between prints + * + * printk_timed_ratelimit() returns true if more than @interval_msecs + * milliseconds have elapsed since the last time printk_timed_ratelimit() + * returned true. + */ +bool printk_timed_ratelimit(unsigned long *caller_jiffies, + unsigned int interval_msecs) +{ + if (*caller_jiffies == 0 + || !time_in_range(jiffies, *caller_jiffies, + *caller_jiffies + + msecs_to_jiffies(interval_msecs))) { + *caller_jiffies = jiffies; + return true; + } + return false; +} +EXPORT_SYMBOL(printk_timed_ratelimit); +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/ptrace.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/ptrace.c --- kernel-2.6.32.54/linux-2.6.32/kernel/ptrace.c 2012-01-16 15:01:39.916725420 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/ptrace.c 2012-01-16 14:51:22.013408365 +0100 @@ -22,6 +22,7 @@ #include #include #include +#include /* @@ -151,6 +152,11 @@ dumpable = get_dumpable(task->mm); if (!dumpable && !capable(CAP_SYS_PTRACE)) return -EPERM; + if (!vx_check(task->xid, VS_ADMIN_P|VS_IDENT)) + return -EPERM; + if (!vx_check(task->xid, VS_IDENT) && + !task_vx_flags(task, VXF_STATE_ADMIN, 0)) + return -EACCES; return security_ptrace_access_check(task, mode); } diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/sched.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/sched.c --- kernel-2.6.32.54/linux-2.6.32/kernel/sched.c 2012-01-16 15:01:39.924725391 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/sched.c 2012-01-16 14:51:22.017408351 +0100 @@ -71,6 +71,8 @@ #include #include #include +#include +#include #include #include @@ -237,6 +239,15 @@ #include +#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_CFS_HARD_LIMITS) +struct cfs_bandwidth { + spinlock_t cfs_runtime_lock; + ktime_t cfs_period; + u64 cfs_runtime; + struct hrtimer cfs_period_timer; +}; +#endif + struct cfs_rq; static LIST_HEAD(task_groups); @@ -251,6 +262,9 @@ /* runqueue "owned" by this group on each cpu */ struct cfs_rq **cfs_rq; unsigned long shares; +#ifdef CONFIG_CFS_HARD_LIMITS + struct cfs_bandwidth cfs_bandwidth; +#endif #endif #ifdef CONFIG_RT_GROUP_SCHED @@ -404,6 +418,19 @@ unsigned long rq_weight; #endif #endif +#ifdef CONFIG_CFS_HARD_LIMITS + /* set when the group is throttled on this cpu */ + int cfs_throttled; + + /* runtime currently consumed by the group on this rq */ + u64 cfs_time; + + /* runtime available to the group on this rq */ + u64 cfs_runtime; + + /* Protects the cfs runtime related fields of this cfs_rq */ + spinlock_t cfs_runtime_lock; +#endif }; /* Real-Time classes' related field in a runqueue: */ @@ -1586,6 +1613,7 @@ } } +static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq); /* * Re-compute the task group their per cpu shares over the given domain. * This needs to be done in a bottom-up fashion because the rq weight of a @@ -1614,8 +1642,10 @@ * If there are currently no tasks on the cpu pretend there * is one of average load so that when a new task gets to * run here it will not get delayed by group starvation. + * Also if the group is throttled on this cpu, pretend that + * it has no tasks. */ - if (!weight) + if (!weight || cfs_rq_throttled(tg->cfs_rq[i])) weight = NICE_0_LOAD; sum_weight += weight; @@ -1792,6 +1822,175 @@ static void calc_load_account_active(struct rq *this_rq); static void update_sysctl(void); + +#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_FAIR_GROUP_SCHED) + +#ifdef CONFIG_SMP +static inline const struct cpumask *sched_bw_period_mask(void) +{ + return cpu_rq(smp_processor_id())->rd->span; +} +#else /* !CONFIG_SMP */ +static inline const struct cpumask *sched_bw_period_mask(void) +{ + return cpu_online_mask; +} +#endif /* CONFIG_SMP */ + +#else +static inline const struct cpumask *sched_bw_period_mask(void) +{ + return cpu_online_mask; +} + +#endif + +#ifdef CONFIG_FAIR_GROUP_SCHED +#ifdef CONFIG_CFS_HARD_LIMITS + +/* + * Runtime allowed for a cfs group before it is hard limited. + * default: Infinite which means no hard limiting. + */ +u64 sched_cfs_runtime = RUNTIME_INF; + +/* + * period over which we hard limit the cfs group's bandwidth. + * default: 0.5s + */ +u64 sched_cfs_period = 500000; + +static inline u64 global_cfs_period(void) +{ + return sched_cfs_period * NSEC_PER_USEC; +} + +static inline u64 global_cfs_runtime(void) +{ + return RUNTIME_INF; +} + +void do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b); + +static inline void cfs_rq_runtime_lock(struct cfs_rq *cfs_rq) +{ + spin_lock(&cfs_rq->cfs_runtime_lock); +} + +static inline void cfs_rq_runtime_unlock(struct cfs_rq *cfs_rq) +{ + spin_unlock(&cfs_rq->cfs_runtime_lock); +} + +/* + * Refresh the runtimes of the throttled groups. + * But nothing much to do now, will populate this in later patches. + */ +static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) +{ + struct cfs_bandwidth *cfs_b = + container_of(timer, struct cfs_bandwidth, cfs_period_timer); + + do_sched_cfs_period_timer(cfs_b); + hrtimer_add_expires_ns(timer, ktime_to_ns(cfs_b->cfs_period)); + return HRTIMER_RESTART; +} + +/* + * TODO: Check if this kind of timer setup is sufficient for cfs or + * should we do what rt is doing. + */ +static void start_cfs_bandwidth(struct task_group *tg) +{ + struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; + + /* + * Timer isn't setup for groups with infinite runtime + */ + if (cfs_b->cfs_runtime == RUNTIME_INF) + return; + + if (hrtimer_active(&cfs_b->cfs_period_timer)) + return; + + hrtimer_start_range_ns(&cfs_b->cfs_period_timer, cfs_b->cfs_period, + 0, HRTIMER_MODE_REL); +} + +static void init_cfs_bandwidth(struct task_group *tg) +{ + struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; + + cfs_b->cfs_period = ns_to_ktime(global_cfs_period()); + cfs_b->cfs_runtime = global_cfs_runtime(); + + spin_lock_init(&cfs_b->cfs_runtime_lock); + + hrtimer_init(&cfs_b->cfs_period_timer, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); + cfs_b->cfs_period_timer.function = &sched_cfs_period_timer; +} + +static inline void destroy_cfs_bandwidth(struct task_group *tg) +{ + hrtimer_cancel(&tg->cfs_bandwidth.cfs_period_timer); +} + +static void init_cfs_hard_limits(struct cfs_rq *cfs_rq, struct task_group *tg) +{ + cfs_rq->cfs_time = 0; + cfs_rq->cfs_throttled = 0; + cfs_rq->cfs_runtime = tg->cfs_bandwidth.cfs_runtime; + spin_lock_init(&cfs_rq->cfs_runtime_lock); +} + +#else /* !CONFIG_CFS_HARD_LIMITS */ + +static void init_cfs_bandwidth(struct task_group *tg) +{ + return; +} + +static inline void destroy_cfs_bandwidth(struct task_group *tg) +{ + return; +} + +static void init_cfs_hard_limits(struct cfs_rq *cfs_rq, struct task_group *tg) +{ + return; +} + +static inline void cfs_rq_runtime_lock(struct cfs_rq *cfs_rq) +{ + return; +} + +static inline void cfs_rq_runtime_unlock(struct cfs_rq *cfs_rq) +{ + return; +} + +#endif /* CONFIG_CFS_HARD_LIMITS */ +#else /* !CONFIG_FAIR_GROUP_SCHED */ + +static inline void cfs_rq_runtime_lock(struct cfs_rq *cfs_rq) +{ + return; +} + +static inline void cfs_rq_runtime_unlock(struct cfs_rq *cfs_rq) +{ + return; +} + +static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) +{ + return 0; +} + +#endif /* CONFIG_FAIR_GROUP_SCHED */ + static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) { set_task_rq(p, cpu); @@ -3129,9 +3328,17 @@ */ void get_avenrun(unsigned long *loads, unsigned long offset, int shift) { - loads[0] = (avenrun[0] + offset) << shift; - loads[1] = (avenrun[1] + offset) << shift; - loads[2] = (avenrun[2] + offset) << shift; + if (vx_flags(VXF_VIRT_LOAD, 0)) { + struct vx_info *vxi = current_vx_info(); + + loads[0] = (vxi->cvirt.load[0] + offset) << shift; + loads[1] = (vxi->cvirt.load[1] + offset) << shift; + loads[2] = (vxi->cvirt.load[2] + offset) << shift; + } else { + loads[0] = (avenrun[0] + offset) << shift; + loads[1] = (avenrun[1] + offset) << shift; + loads[2] = (avenrun[2] + offset) << shift; + } } static unsigned long @@ -5245,16 +5452,19 @@ cputime_t cputime_scaled) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + struct vx_info *vxi = p->vx_info; /* p is _always_ current */ cputime64_t tmp; + int nice = (TASK_NICE(p) > 0); /* Add user time to process. */ p->utime = cputime_add(p->utime, cputime); p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); + vx_account_user(vxi, cputime, nice); account_group_user_time(p, cputime); /* Add user time to cpustat. */ tmp = cputime_to_cputime64(cputime); - if (TASK_NICE(p) > 0) + if (nice) cpustat->nice = cputime64_add(cpustat->nice, tmp); else cpustat->user = cputime64_add(cpustat->user, tmp); @@ -5300,6 +5510,7 @@ cputime_t cputime, cputime_t cputime_scaled) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + struct vx_info *vxi = p->vx_info; /* p is _always_ current */ cputime64_t tmp; if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { @@ -5310,6 +5521,7 @@ /* Add system time to process. */ p->stime = cputime_add(p->stime, cputime); p->stimescaled = cputime_add(p->stimescaled, cputime_scaled); + vx_account_system(vxi, cputime, 0 /* do we have idle time? */); account_group_system_time(p, cputime); /* Add system time to cpustat. */ @@ -6393,7 +6605,7 @@ nice = 19; if (increment < 0 && !can_nice(current, nice)) - return -EPERM; + return vx_flags(VXF_IGNEG_NICE, 0) ? 0 : -EPERM; retval = security_task_setnice(current, nice); if (retval) @@ -9470,6 +9682,32 @@ } #endif +#ifdef CONFIG_SMP +static void disable_runtime(struct rq *rq) +{ + unsigned long flags; + + spin_lock_irqsave(&rq->lock, flags); +#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_CFS_HARD_LIMITS) + disable_runtime_cfs(rq); +#endif + disable_runtime_rt(rq); + spin_unlock_irqrestore(&rq->lock, flags); +} + +static void enable_runtime(struct rq *rq) +{ + unsigned long flags; + + spin_lock_irqsave(&rq->lock, flags); +#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_CFS_HARD_LIMITS) + enable_runtime_cfs(rq); +#endif + enable_runtime_rt(rq); + spin_unlock_irqrestore(&rq->lock, flags); +} +#endif + static int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -9602,6 +9840,7 @@ struct rq *rq = cpu_rq(cpu); tg->cfs_rq[cpu] = cfs_rq; init_cfs_rq(cfs_rq, rq); + init_cfs_hard_limits(cfs_rq, tg); cfs_rq->tg = tg; if (add) list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); @@ -9710,6 +9949,10 @@ global_rt_period(), global_rt_runtime()); #endif /* CONFIG_RT_GROUP_SCHED */ +#ifdef CONFIG_FAIR_GROUP_SCHED + init_cfs_bandwidth(&init_task_group); +#endif + #ifdef CONFIG_CGROUP_SCHED list_add(&init_task_group.list, &task_groups); INIT_LIST_HEAD(&init_task_group.children); @@ -9731,6 +9974,7 @@ init_cfs_rq(&rq->cfs, rq); init_rt_rq(&rq->rt, rq); #ifdef CONFIG_FAIR_GROUP_SCHED + init_cfs_hard_limits(&rq->cfs, &init_task_group); init_task_group.shares = init_task_group_load; INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); #ifdef CONFIG_CGROUP_SCHED @@ -9992,6 +10236,7 @@ { int i; + destroy_cfs_bandwidth(tg); for_each_possible_cpu(i) { if (tg->cfs_rq) kfree(tg->cfs_rq[i]); @@ -10018,6 +10263,7 @@ if (!tg->se) goto err; + init_cfs_bandwidth(tg); tg->shares = NICE_0_LOAD; for_each_possible_cpu(i) { @@ -10734,6 +10980,100 @@ return (u64) tg->shares; } + +#ifdef CONFIG_CFS_HARD_LIMITS + +static int tg_set_cfs_bandwidth(struct task_group *tg, + u64 cfs_period, u64 cfs_runtime) +{ + int i; + + spin_lock_irq(&tg->cfs_bandwidth.cfs_runtime_lock); + tg->cfs_bandwidth.cfs_period = ns_to_ktime(cfs_period); + tg->cfs_bandwidth.cfs_runtime = cfs_runtime; + + for_each_possible_cpu(i) { + struct cfs_rq *cfs_rq = tg->cfs_rq[i]; + + cfs_rq_runtime_lock(cfs_rq); + cfs_rq->cfs_runtime = cfs_runtime; + cfs_rq_runtime_unlock(cfs_rq); + } + + start_cfs_bandwidth(tg); + spin_unlock_irq(&tg->cfs_bandwidth.cfs_runtime_lock); + return 0; +} + +int tg_set_cfs_runtime(struct task_group *tg, long cfs_runtime_us) +{ + u64 cfs_runtime, cfs_period; + + cfs_period = ktime_to_ns(tg->cfs_bandwidth.cfs_period); + cfs_runtime = (u64)cfs_runtime_us * NSEC_PER_USEC; + if (cfs_runtime_us < 0) + cfs_runtime = RUNTIME_INF; + + return tg_set_cfs_bandwidth(tg, cfs_period, cfs_runtime); +} + +long tg_get_cfs_runtime(struct task_group *tg) +{ + u64 cfs_runtime_us; + + if (tg->cfs_bandwidth.cfs_runtime == RUNTIME_INF) + return -1; + + cfs_runtime_us = tg->cfs_bandwidth.cfs_runtime; + do_div(cfs_runtime_us, NSEC_PER_USEC); + return cfs_runtime_us; +} + +int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) +{ + u64 cfs_runtime, cfs_period; + + cfs_period = (u64)cfs_period_us * NSEC_PER_USEC; + cfs_runtime = tg->cfs_bandwidth.cfs_runtime; + + if (cfs_period == 0) + return -EINVAL; + + return tg_set_cfs_bandwidth(tg, cfs_period, cfs_runtime); +} + +long tg_get_cfs_period(struct task_group *tg) +{ + u64 cfs_period_us; + + cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.cfs_period); + do_div(cfs_period_us, NSEC_PER_USEC); + return cfs_period_us; +} + +static s64 cpu_cfs_runtime_read_s64(struct cgroup *cgrp, struct cftype *cft) +{ + return tg_get_cfs_runtime(cgroup_tg(cgrp)); +} + +static int cpu_cfs_runtime_write_s64(struct cgroup *cgrp, struct cftype *cftype, + s64 cfs_runtime_us) +{ + return tg_set_cfs_runtime(cgroup_tg(cgrp), cfs_runtime_us); +} + +static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft) +{ + return tg_get_cfs_period(cgroup_tg(cgrp)); +} + +static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype, + u64 cfs_period_us) +{ + return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us); +} + +#endif /* CONFIG_CFS_HARD_LIMITS */ #endif /* CONFIG_FAIR_GROUP_SCHED */ #ifdef CONFIG_RT_GROUP_SCHED @@ -10767,6 +11107,18 @@ .read_u64 = cpu_shares_read_u64, .write_u64 = cpu_shares_write_u64, }, +#ifdef CONFIG_CFS_HARD_LIMITS + { + .name = "cfs_runtime_us", + .read_s64 = cpu_cfs_runtime_read_s64, + .write_s64 = cpu_cfs_runtime_write_s64, + }, + { + .name = "cfs_period_us", + .read_u64 = cpu_cfs_period_read_u64, + .write_u64 = cpu_cfs_period_write_u64, + }, +#endif /* CONFIG_CFS_HARD_LIMITS */ #endif #ifdef CONFIG_RT_GROUP_SCHED { diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/sched_debug.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/sched_debug.c --- kernel-2.6.32.54/linux-2.6.32/kernel/sched_debug.c 2012-01-16 15:01:39.924725391 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/sched_debug.c 2012-01-16 14:51:22.021408337 +0100 @@ -80,6 +80,11 @@ PN(se->wait_max); PN(se->wait_sum); P(se->wait_count); +#ifdef CONFIG_CFS_HARD_LIMITS + PN(se->throttle_max); + PN(se->throttle_sum); + P(se->throttle_count); +#endif #endif P(se->load.weight); #undef PN @@ -209,6 +214,16 @@ #ifdef CONFIG_SMP SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares); #endif +#ifdef CONFIG_CFS_HARD_LIMITS + spin_lock_irqsave(&rq->lock, flags); + SEQ_printf(m, " .%-30s: %d\n", "cfs_throttled", + cfs_rq->cfs_throttled); + SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "cfs_time", + SPLIT_NS(cfs_rq->cfs_time)); + SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "cfs_runtime", + SPLIT_NS(cfs_rq->cfs_runtime)); + spin_unlock_irqrestore(&rq->lock, flags); +#endif /* CONFIG_CFS_HARD_LIMITS */ print_cfs_group_stats(m, cpu, cfs_rq->tg); #endif } @@ -309,7 +324,7 @@ u64 now = ktime_to_ns(ktime_get()); int cpu; - SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n", + SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n", init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/sched_fair.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/sched_fair.c --- kernel-2.6.32.54/linux-2.6.32/kernel/sched_fair.c 2012-01-16 15:01:39.928725377 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/sched_fair.c 2012-01-16 14:51:22.021408337 +0100 @@ -192,7 +192,308 @@ } } -#else /* !CONFIG_FAIR_GROUP_SCHED */ +#ifdef CONFIG_CFS_HARD_LIMITS + +static inline void update_stats_throttle_start(struct cfs_rq *cfs_rq, + struct sched_entity *se) +{ + schedstat_set(se->throttle_start, rq_of(cfs_rq)->clock); +} + +static inline void update_stats_throttle_end(struct cfs_rq *cfs_rq, + struct sched_entity *se) +{ + schedstat_set(se->throttle_max, max(se->throttle_max, + rq_of(cfs_rq)->clock - se->throttle_start)); + schedstat_set(se->throttle_count, se->throttle_count + 1); + schedstat_set(se->throttle_sum, se->throttle_sum + + rq_of(cfs_rq)->clock - se->throttle_start); + schedstat_set(se->throttle_start, 0); +} + +static inline +struct cfs_rq *sched_cfs_period_cfs_rq(struct cfs_bandwidth *cfs_b, int cpu) +{ + return container_of(cfs_b, struct task_group, + cfs_bandwidth)->cfs_rq[cpu]; +} + +static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) +{ + return cfs_rq->cfs_throttled; +} + +#ifdef CONFIG_SMP +/* + * Ensure this RQ takes back all the runtime it lend to its neighbours. + */ +static void disable_runtime_cfs(struct rq *rq) +{ + struct root_domain *rd = rq->rd; + struct cfs_rq *cfs_rq; + + if (unlikely(!scheduler_running)) + return; + + for_each_leaf_cfs_rq(rq, cfs_rq) { + struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth; + s64 want; + int i; + + spin_lock(&cfs_b->cfs_runtime_lock); + spin_lock(&cfs_rq->cfs_runtime_lock); + + /* + * Either we're all are infinity and nobody needs to borrow, + * or we're already disabled and this have nothing to do, or + * we have exactly the right amount of runtime to take out. + */ + if (cfs_rq->cfs_runtime == RUNTIME_INF || + cfs_rq->cfs_runtime == cfs_b->cfs_runtime) + goto balanced; + spin_unlock(&cfs_rq->cfs_runtime_lock); + + /* + * Calculate the difference between what we started out with + * and what we current have, that's the amount of runtime + * we lend and now have to reclaim. + */ + want = cfs_b->cfs_runtime - cfs_rq->cfs_runtime; + + /* + * Greedy reclaim, take back as much as possible. + */ + for_each_cpu(i, rd->span) { + struct cfs_rq *iter = sched_cfs_period_cfs_rq(cfs_b, i); + s64 diff; + + /* + * Can't reclaim from ourselves or disabled runqueues. + */ + if (iter == cfs_rq || iter->cfs_runtime == RUNTIME_INF) + continue; + + spin_lock(&iter->cfs_runtime_lock); + if (want > 0) { + diff = min_t(s64, iter->cfs_runtime, want); + iter->cfs_runtime -= diff; + want -= diff; + } else { + iter->cfs_runtime -= want; + want -= want; + } + + spin_unlock(&iter->cfs_runtime_lock); + if (!want) + break; + } + + spin_lock(&cfs_rq->cfs_runtime_lock); + /* + * We cannot be left wanting - that would mean some + * runtime leaked out of the system. + */ + BUG_ON(want); +balanced: + /* + * Disable all the borrow logic by pretending we have infinite + * runtime - in which case borrowing doesn't make sense. + */ + cfs_rq->cfs_runtime = RUNTIME_INF; + spin_unlock(&cfs_rq->cfs_runtime_lock); + spin_unlock(&cfs_b->cfs_runtime_lock); + } +} + +static void enable_runtime_cfs(struct rq *rq) +{ + struct cfs_rq *cfs_rq; + + if (unlikely(!scheduler_running)) + return; + + /* + * Reset each runqueue's bandwidth settings + */ + for_each_leaf_cfs_rq(rq, cfs_rq) { + struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth; + + spin_lock(&cfs_b->cfs_runtime_lock); + spin_lock(&cfs_rq->cfs_runtime_lock); + cfs_rq->cfs_runtime = cfs_b->cfs_runtime; + cfs_rq->cfs_time = 0; + cfs_rq->cfs_throttled = 0; + spin_unlock(&cfs_rq->cfs_runtime_lock); + spin_unlock(&cfs_b->cfs_runtime_lock); + } +} + +/* + * Ran out of runtime, check if we can borrow some from others + * instead of getting throttled right away. + */ +static void do_cfs_balance_runtime(struct cfs_rq *cfs_rq) +{ + struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth; + const struct cpumask *span = sched_bw_period_mask(); + int i, weight; + u64 cfs_period; + + weight = cpumask_weight(span); + spin_lock(&cfs_b->cfs_runtime_lock); + cfs_period = ktime_to_ns(cfs_b->cfs_period); + + for_each_cpu(i, span) { + struct cfs_rq *borrow_cfs_rq = + sched_cfs_period_cfs_rq(cfs_b, i); + s64 diff; + + if (borrow_cfs_rq == cfs_rq) + continue; + + cfs_rq_runtime_lock(borrow_cfs_rq); + if (borrow_cfs_rq->cfs_runtime == RUNTIME_INF) { + cfs_rq_runtime_unlock(borrow_cfs_rq); + continue; + } + + diff = borrow_cfs_rq->cfs_runtime - borrow_cfs_rq->cfs_time; + if (diff > 0) { + diff = div_u64((u64)diff, weight); + if (cfs_rq->cfs_runtime + diff > cfs_period) + diff = cfs_period - cfs_rq->cfs_runtime; + borrow_cfs_rq->cfs_runtime -= diff; + cfs_rq->cfs_runtime += diff; + if (cfs_rq->cfs_runtime == cfs_period) { + cfs_rq_runtime_unlock(borrow_cfs_rq); + break; + } + } + cfs_rq_runtime_unlock(borrow_cfs_rq); + } + spin_unlock(&cfs_b->cfs_runtime_lock); +} + +/* + * Called with rq->runtime_lock held. + */ +static void cfs_balance_runtime(struct cfs_rq *cfs_rq) +{ + cfs_rq_runtime_unlock(cfs_rq); + do_cfs_balance_runtime(cfs_rq); + cfs_rq_runtime_lock(cfs_rq); +} + +#else /* !CONFIG_SMP */ + +static void cfs_balance_runtime(struct cfs_rq *cfs_rq) +{ + return; +} +#endif /* CONFIG_SMP */ + +/* + * Check if group entity exceeded its runtime. If so, mark the cfs_rq as + * throttled mark the current task for reschedling. + */ +static void sched_cfs_runtime_exceeded(struct sched_entity *se, + struct task_struct *tsk_curr, unsigned long delta_exec) +{ + struct cfs_rq *cfs_rq; + + cfs_rq = group_cfs_rq(se); + + if (cfs_rq->cfs_runtime == RUNTIME_INF) + return; + + cfs_rq->cfs_time += delta_exec; + + if (cfs_rq_throttled(cfs_rq)) + return; + + if (cfs_rq->cfs_time > cfs_rq->cfs_runtime) + cfs_balance_runtime(cfs_rq); + + if (cfs_rq->cfs_time > cfs_rq->cfs_runtime) { + cfs_rq->cfs_throttled = 1; + update_stats_throttle_start(cfs_rq, se); + resched_task(tsk_curr); + } +} + +static inline void update_curr_group(struct sched_entity *curr, + unsigned long delta_exec, struct task_struct *tsk_curr) +{ + sched_cfs_runtime_exceeded(curr, tsk_curr, delta_exec); +} + +static void enqueue_entity_locked(struct cfs_rq *cfs_rq, + struct sched_entity *se, int flags); + +static void enqueue_throttled_entity(struct rq *rq, struct sched_entity *se) +{ + for_each_sched_entity(se) { + struct cfs_rq *gcfs_rq = group_cfs_rq(se); + + if (se->on_rq || cfs_rq_throttled(gcfs_rq) || + !gcfs_rq->nr_running) + break; + enqueue_entity_locked(cfs_rq_of(se), se, 0); + } +} + +/* + * Refresh runtimes of all cfs_rqs in this group, i,e., + * refresh runtimes of the representative cfs_rq of this + * tg on all cpus. Enqueue any throttled entity back. + */ +void do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b) +{ + int i; + const struct cpumask *span = sched_bw_period_mask(); + unsigned long flags; + + for_each_cpu(i, span) { + struct rq *rq = cpu_rq(i); + struct cfs_rq *cfs_rq = sched_cfs_period_cfs_rq(cfs_b, i); + struct sched_entity *se = cfs_rq->tg->se[i]; + + spin_lock_irqsave(&rq->lock, flags); + cfs_rq_runtime_lock(cfs_rq); + cfs_rq->cfs_time = 0; + if (cfs_rq_throttled(cfs_rq)) { + update_rq_clock(rq); + update_stats_throttle_end(cfs_rq, se); + cfs_rq->cfs_throttled = 0; + enqueue_throttled_entity(rq, se); + } + cfs_rq_runtime_unlock(cfs_rq); + spin_unlock_irqrestore(&rq->lock, flags); + } +} + +#else + +static inline void update_curr_group(struct sched_entity *curr, + unsigned long delta_exec, struct task_struct *tsk_curr) +{ + return; +} + +static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) +{ + return 0; +} + +#endif /* CONFIG_CFS_HARD_LIMITS */ + +#else /* CONFIG_FAIR_GROUP_SCHED */ + +static inline void update_curr_group(struct sched_entity *curr, + unsigned long delta_exec, struct task_struct *tsk_curr) +{ + return; +} static inline struct task_struct *task_of(struct sched_entity *se) { @@ -254,7 +555,6 @@ #endif /* CONFIG_FAIR_GROUP_SCHED */ - /************************************************************** * Scheduling class tree data structure manipulation methods: */ @@ -493,14 +793,25 @@ update_min_vruntime(cfs_rq); } -static void update_curr(struct cfs_rq *cfs_rq) +static void update_curr_task(struct sched_entity *curr, + unsigned long delta_exec) +{ + struct task_struct *curtask = task_of(curr); + + trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); + cpuacct_charge(curtask, delta_exec); + account_group_exec_runtime(curtask, delta_exec); +} + +static int update_curr_common(struct cfs_rq *cfs_rq, unsigned long *delta) { struct sched_entity *curr = cfs_rq->curr; - u64 now = rq_of(cfs_rq)->clock_task; + struct rq *rq = rq_of(cfs_rq); + u64 now = rq->clock_task; unsigned long delta_exec; if (unlikely(!curr)) - return; + return 1; /* * Get the amount of time the current task was running @@ -509,17 +820,29 @@ */ delta_exec = (unsigned long)(now - curr->exec_start); if (!delta_exec) - return; + return 1; __update_curr(cfs_rq, curr, delta_exec); curr->exec_start = now; + *delta = delta_exec; + return 0; +} - if (entity_is_task(curr)) { - struct task_struct *curtask = task_of(curr); +static void update_curr(struct cfs_rq *cfs_rq) +{ + struct sched_entity *curr = cfs_rq->curr; + struct rq *rq = rq_of(cfs_rq); + unsigned long delta_exec; + + if (update_curr_common(cfs_rq, &delta_exec)) + return ; - trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); - cpuacct_charge(curtask, delta_exec); - account_group_exec_runtime(curtask, delta_exec); + if (entity_is_task(curr)) + update_curr_task(curr, delta_exec); + else { + cfs_rq_runtime_lock(group_cfs_rq(curr)); + update_curr_group(curr, delta_exec, rq->curr); + cfs_rq_runtime_unlock(group_cfs_rq(curr)); } } @@ -748,6 +1071,25 @@ #define ENQUEUE_MIGRATE 2 static void +enqueue_entity_common(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) +{ + account_entity_enqueue(cfs_rq, se); + + if (flags & ENQUEUE_WAKEUP) { + place_entity(cfs_rq, se, 0); + enqueue_sleeper(cfs_rq, se); + } + + update_stats_enqueue(cfs_rq, se); + check_spread(cfs_rq, se); + if (se != cfs_rq->curr) + __enqueue_entity(cfs_rq, se); + + if (entity_is_task(se)) + vx_activate_task(task_of(se)); +} + +static void enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { /* @@ -761,17 +1103,17 @@ * Update run-time statistics of the 'current'. */ update_curr(cfs_rq); - account_entity_enqueue(cfs_rq, se); - - if (flags & ENQUEUE_WAKEUP) { - place_entity(cfs_rq, se, 0); - enqueue_sleeper(cfs_rq, se); - } + enqueue_entity_common(cfs_rq, se, flags); +} - update_stats_enqueue(cfs_rq, se); - check_spread(cfs_rq, se); - if (se != cfs_rq->curr) - __enqueue_entity(cfs_rq, se); +static void enqueue_entity_locked(struct cfs_rq *cfs_rq, + struct sched_entity *se, int flags) +{ + /* + * Update run-time statistics of the 'current'. + */ + // update_curr_locked(cfs_rq); + enqueue_entity_common(cfs_rq, se, flags); } static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) @@ -815,6 +1157,8 @@ if (se != cfs_rq->curr) __dequeue_entity(cfs_rq, se); + if (entity_is_task(se)) + vx_deactivate_task(task_of(se)); account_entity_dequeue(cfs_rq, se); update_min_vruntime(cfs_rq); @@ -919,6 +1263,32 @@ return se; } +/* + * Called from put_prev_entity() + * If a group entity (@se) is found to be throttled, it will not be put back + * on @cfs_rq, which is equivalent to dequeing it. + */ +static int dequeue_throttled_entity(struct cfs_rq *cfs_rq, + struct sched_entity *se) +{ + struct cfs_rq *gcfs_rq = group_cfs_rq(se); + + if (entity_is_task(se)) + return 0; + + cfs_rq_runtime_lock(gcfs_rq); + if (!cfs_rq_throttled(gcfs_rq) && gcfs_rq->nr_running) { + cfs_rq_runtime_unlock(gcfs_rq); + return 0; + } + + __clear_buddies(cfs_rq, se); + account_entity_dequeue(cfs_rq, se); + cfs_rq->curr = NULL; + cfs_rq_runtime_unlock(gcfs_rq); + return 1; +} + static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) { /* @@ -930,6 +1300,8 @@ check_spread(cfs_rq, prev); if (prev->on_rq) { + if (dequeue_throttled_entity(cfs_rq, prev)) + return; update_stats_wait_start(cfs_rq, prev); /* Put 'current' back into the tree. */ __enqueue_entity(cfs_rq, prev); @@ -1026,10 +1398,28 @@ } #endif +static int enqueue_group_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, + int flags) +{ + struct cfs_rq *gcfs_rq = group_cfs_rq(se); + int ret = 0; + + cfs_rq_runtime_lock(gcfs_rq); + if (cfs_rq_throttled(gcfs_rq)) { + ret = 1; + goto out; + } + enqueue_entity_locked(cfs_rq, se, flags); +out: + cfs_rq_runtime_unlock(gcfs_rq); + return ret; +} + /* * The enqueue_task method is called before nr_running is * increased. Here we update the fair scheduling stats and * then put the task into the rbtree: + * Don't enqueue a throttled entity further into the hierarchy. */ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head) @@ -1046,11 +1436,15 @@ for_each_sched_entity(se) { if (se->on_rq) break; + cfs_rq = cfs_rq_of(se); - enqueue_entity(cfs_rq, se, flags); + if (entity_is_task(se)) + enqueue_entity(cfs_rq, se, flags); + else + if (enqueue_group_entity(cfs_rq, se, flags)) + break; flags = ENQUEUE_WAKEUP; } - hrtick_update(rq); } @@ -1070,6 +1464,17 @@ /* Don't dequeue parent if it has other entities besides us */ if (cfs_rq->load.weight) break; + + /* + * If this cfs_rq is throttled, then it is already + * dequeued. + */ + cfs_rq_runtime_lock(cfs_rq); + if (cfs_rq_throttled(cfs_rq)) { + cfs_rq_runtime_unlock(cfs_rq); + break; + } + cfs_rq_runtime_unlock(cfs_rq); sleep = 1; } @@ -1896,9 +2301,10 @@ u64 rem_load, moved_load; /* - * empty group + * empty group or throttled group */ - if (!busiest_cfs_rq->task_weight) + if (!busiest_cfs_rq->task_weight || + cfs_rq_throttled(busiest_cfs_rq)) continue; rem_load = (u64)rem_load_move * busiest_weight; @@ -1947,6 +2353,12 @@ for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { /* + * Don't move task from a throttled cfs_rq + */ + if (cfs_rq_throttled(busy_cfs_rq)) + continue; + + /* * pass busy_cfs_rq argument into * load_balance_[start|next]_fair iterators */ diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/sched_rt.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/sched_rt.c --- kernel-2.6.32.54/linux-2.6.32/kernel/sched_rt.c 2012-01-16 15:01:39.928725377 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/sched_rt.c 2012-01-16 14:51:22.025408323 +0100 @@ -235,18 +235,6 @@ return p->prio != p->normal_prio; } -#ifdef CONFIG_SMP -static inline const struct cpumask *sched_rt_period_mask(void) -{ - return cpu_rq(smp_processor_id())->rd->span; -} -#else -static inline const struct cpumask *sched_rt_period_mask(void) -{ - return cpu_online_mask; -} -#endif - static inline struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) { @@ -296,11 +284,6 @@ return rt_rq->rt_throttled; } -static inline const struct cpumask *sched_rt_period_mask(void) -{ - return cpu_online_mask; -} - static inline struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) { @@ -373,7 +356,7 @@ /* * Ensure this RQ takes back all the runtime it lend to its neighbours. */ -static void __disable_runtime(struct rq *rq) +static void disable_runtime_rt(struct rq *rq) { struct root_domain *rd = rq->rd; struct rt_rq *rt_rq; @@ -450,16 +433,7 @@ } } -static void disable_runtime(struct rq *rq) -{ - unsigned long flags; - - spin_lock_irqsave(&rq->lock, flags); - __disable_runtime(rq); - spin_unlock_irqrestore(&rq->lock, flags); -} - -static void __enable_runtime(struct rq *rq) +static void enable_runtime_rt(struct rq *rq) { struct rt_rq *rt_rq; @@ -482,15 +456,6 @@ } } -static void enable_runtime(struct rq *rq) -{ - unsigned long flags; - - spin_lock_irqsave(&rq->lock, flags); - __enable_runtime(rq); - spin_unlock_irqrestore(&rq->lock, flags); -} - static int balance_runtime(struct rt_rq *rt_rq) { int more = 0; @@ -518,7 +483,7 @@ if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) return 1; - span = sched_rt_period_mask(); + span = sched_bw_period_mask(); for_each_cpu(i, span) { int enqueue = 0; struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); @@ -1571,7 +1536,7 @@ if (rq->rt.overloaded) rt_set_overload(rq); - __enable_runtime(rq); + enable_runtime_rt(rq); cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); } @@ -1582,7 +1547,7 @@ if (rq->rt.overloaded) rt_clear_overload(rq); - __disable_runtime(rq); + disable_runtime_rt(rq); cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); } diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/signal.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/signal.c --- kernel-2.6.32.54/linux-2.6.32/kernel/signal.c 2012-01-16 15:01:39.928725377 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/signal.c 2012-01-16 14:51:22.025408323 +0100 @@ -27,6 +27,8 @@ #include #include #include +#include +#include #include #include @@ -598,6 +600,14 @@ if (!valid_signal(sig)) return -EINVAL; + if ((info != SEND_SIG_NOINFO) && + (is_si_special(info) || !SI_FROMUSER(info))) + goto skip; + + vxdprintk(VXD_CBIT(misc, 7), + "check_kill_permission(%d,%p,%p[#%u,%u])", + sig, info, t, vx_task_xid(t), t->pid); + if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info))) return 0; @@ -627,6 +637,20 @@ } } + error = -EPERM; + if (t->pid == 1 && current->xid) + return error; + + error = -ESRCH; + /* FIXME: we shouldn't return ESRCH ever, to avoid + loops, maybe ENOENT or EACCES? */ + if (!vx_check(vx_task_xid(t), VS_WATCH_P | VS_IDENT)) { + vxdprintk(current->xid || VXD_CBIT(misc, 7), + "signal %d[%p] xid mismatch %p[#%u,%u] xid=#%u", + sig, info, t, vx_task_xid(t), t->pid, current->xid); + return error; + } +skip: return security_task_kill(t, info, sig, 0); } @@ -1115,7 +1139,7 @@ rcu_read_lock(); retry: p = pid_task(pid, PIDTYPE_PID); - if (p) { + if (p && vx_check(vx_task_xid(p), VS_IDENT)) { error = group_send_sig_info(sig, info, p); if (unlikely(error == -ESRCH)) /* @@ -1154,7 +1178,7 @@ read_lock(&tasklist_lock); p = pid_task(pid, PIDTYPE_PID); - if (!p) { + if (!p || !vx_check(vx_task_xid(p), VS_IDENT)) { ret = -ESRCH; goto out_unlock; } @@ -1208,8 +1232,10 @@ struct task_struct * p; for_each_process(p) { - if (task_pid_vnr(p) > 1 && - !same_thread_group(p, current)) { + if (vx_check(vx_task_xid(p), VS_ADMIN|VS_IDENT) && + task_pid_vnr(p) > 1 && + !same_thread_group(p, current) && + !vx_current_initpid(p->pid)) { int err = group_send_sig_info(sig, info, p); ++count; if (err != -EPERM) @@ -1874,6 +1900,11 @@ !sig_kernel_only(signr)) continue; + /* virtual init is protected against user signals */ + if ((info->si_code == SI_USER) && + vx_current_initpid(current->pid)) + continue; + if (sig_kernel_stop(signr)) { /* * The default action is to stop all threads in diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/softirq.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/softirq.c --- kernel-2.6.32.54/linux-2.6.32/kernel/softirq.c 2012-01-16 15:01:39.932725363 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/softirq.c 2012-01-16 14:51:22.025408323 +0100 @@ -24,6 +24,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/sys.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/sys.c --- kernel-2.6.32.54/linux-2.6.32/kernel/sys.c 2012-01-16 15:01:39.932725363 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/sys.c 2012-01-16 14:51:22.025408323 +0100 @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -130,7 +131,10 @@ goto out; } if (niceval < task_nice(p) && !can_nice(p, niceval)) { - error = -EACCES; + if (vx_flags(VXF_IGNEG_NICE, 0)) + error = 0; + else + error = -EACCES; goto out; } no_nice = security_task_setnice(p, niceval); @@ -179,6 +183,8 @@ else pgrp = task_pgrp(current); do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { + if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT)) + continue; error = set_one_prio(p, niceval, error); } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); break; @@ -240,6 +246,8 @@ else pgrp = task_pgrp(current); do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { + if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT)) + continue; niceval = 20 - task_nice(p); if (niceval > retval) retval = niceval; @@ -349,6 +357,9 @@ machine_power_off(); } EXPORT_SYMBOL_GPL(kernel_power_off); + +long vs_reboot(unsigned int, void __user *); + /* * Reboot system call: for obvious reasons only root may call it, * and even root needs to set up some magic numbers in the registers @@ -381,6 +392,9 @@ if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off) cmd = LINUX_REBOOT_CMD_HALT; + if (!vx_check(0, VS_ADMIN|VS_WATCH)) + return vs_reboot(cmd, arg); + lock_kernel(); switch (cmd) { case LINUX_REBOOT_CMD_RESTART: @@ -1129,7 +1143,7 @@ int errno; char tmp[__NEW_UTS_LEN]; - if (!capable(CAP_SYS_ADMIN)) + if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME)) return -EPERM; if (len < 0 || len > __NEW_UTS_LEN) return -EINVAL; @@ -1178,7 +1192,7 @@ int errno; char tmp[__NEW_UTS_LEN]; - if (!capable(CAP_SYS_ADMIN)) + if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME)) return -EPERM; if (len < 0 || len > __NEW_UTS_LEN) return -EINVAL; @@ -1247,7 +1261,7 @@ return -EINVAL; old_rlim = current->signal->rlim + resource; if ((new_rlim.rlim_max > old_rlim->rlim_max) && - !capable(CAP_SYS_RESOURCE)) + !vx_capable(CAP_SYS_RESOURCE, VXC_SET_RLIMIT)) return -EPERM; if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open) return -EPERM; diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/sysctl.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/sysctl.c --- kernel-2.6.32.54/linux-2.6.32/kernel/sysctl.c 2012-01-16 15:01:39.932725363 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/sysctl.c 2012-01-16 14:51:22.029408309 +0100 @@ -124,6 +124,7 @@ extern char modprobe_path[]; extern int modules_disabled; #endif +extern char vshelper_path[]; #ifdef CONFIG_CHR_DEV_SG extern int sg_big_buff; #endif @@ -593,6 +594,15 @@ .strategy = &sysctl_string, }, #endif + { + .ctl_name = KERN_VSHELPER, + .procname = "vshelper", + .data = &vshelper_path, + .maxlen = 256, + .mode = 0644, + .proc_handler = &proc_dostring, + .strategy = &sysctl_string, + }, #ifdef CONFIG_CHR_DEV_SG { .ctl_name = KERN_SG_BIG_BUFF, diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/sysctl_check.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/sysctl_check.c --- kernel-2.6.32.54/linux-2.6.32/kernel/sysctl_check.c 2012-01-16 15:01:39.936725349 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/sysctl_check.c 2012-01-16 14:51:22.033408295 +0100 @@ -39,6 +39,7 @@ { KERN_PANIC, "panic" }, { KERN_REALROOTDEV, "real-root-dev" }, + { KERN_VSHELPER, "vshelper", }, { KERN_SPARC_REBOOT, "reboot-cmd" }, { KERN_CTLALTDEL, "ctrl-alt-del" }, @@ -1218,6 +1219,22 @@ {} }; +static struct trans_ctl_table trans_vserver_table[] = { + { 1, "debug_switch" }, + { 2, "debug_xid" }, + { 3, "debug_nid" }, + { 4, "debug_tag" }, + { 5, "debug_net" }, + { 6, "debug_limit" }, + { 7, "debug_cres" }, + { 8, "debug_dlim" }, + { 9, "debug_quota" }, + { 10, "debug_cvirt" }, + { 11, "debug_space" }, + { 12, "debug_misc" }, + {} +}; + static const struct trans_ctl_table trans_root_table[] = { { CTL_KERN, "kernel", trans_kern_table }, { CTL_VM, "vm", trans_vm_table }, @@ -1234,6 +1251,7 @@ { CTL_SUNRPC, "sunrpc", trans_sunrpc_table }, { CTL_PM, "pm", trans_pm_table }, { CTL_FRV, "frv", trans_frv_table }, + { CTL_VSERVER, "vserver", trans_vserver_table }, {} }; diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/time.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/time.c --- kernel-2.6.32.54/linux-2.6.32/kernel/time.c 2012-01-16 15:01:39.936725349 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/time.c 2012-01-16 14:51:22.033408295 +0100 @@ -63,6 +63,7 @@ SYSCALL_DEFINE1(time, time_t __user *, tloc) { time_t i = get_seconds(); +/* FIXME: do_gettimeofday(&tv) -> vx_gettimeofday(&tv) */ if (tloc) { if (put_user(i,tloc)) @@ -93,7 +94,7 @@ if (err) return err; - do_settimeofday(&tv); + vx_settimeofday(&tv); return 0; } @@ -104,7 +105,7 @@ { if (likely(tv != NULL)) { struct timeval ktv; - do_gettimeofday(&ktv); + vx_gettimeofday(&ktv); if (copy_to_user(tv, &ktv, sizeof(ktv))) return -EFAULT; } @@ -179,7 +180,7 @@ /* SMP safe, again the code in arch/foo/time.c should * globally block out interrupts when it runs. */ - return do_settimeofday(tv); + return vx_settimeofday(tv); } return 0; } @@ -311,7 +312,7 @@ { struct timeval x; - do_gettimeofday(&x); + vx_gettimeofday(&x); tv->tv_sec = x.tv_sec; tv->tv_nsec = x.tv_usec * NSEC_PER_USEC; } diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/timer.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/timer.c --- kernel-2.6.32.54/linux-2.6.32/kernel/timer.c 2012-01-16 15:01:39.940725335 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/timer.c 2012-01-16 14:51:22.033408295 +0100 @@ -39,6 +39,10 @@ #include #include #include +#include +#include +#include +#include #include #include @@ -1261,12 +1265,6 @@ #endif -#ifndef __alpha__ - -/* - * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this - * should be moved into arch/i386 instead? - */ /** * sys_getpid - return the thread group id of the current process @@ -1295,10 +1293,23 @@ rcu_read_lock(); pid = task_tgid_vnr(current->real_parent); rcu_read_unlock(); + return vx_map_pid(pid); +} + +#ifdef __alpha__ - return pid; +/* + * The Alpha uses getxpid, getxuid, and getxgid instead. + */ + +asmlinkage long do_getxpid(long *ppid) +{ + *ppid = sys_getppid(); + return sys_getpid(); } +#else /* _alpha_ */ + SYSCALL_DEFINE0(getuid) { /* Only we change this so SMP safe */ diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/user_namespace.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/user_namespace.c --- kernel-2.6.32.54/linux-2.6.32/kernel/user_namespace.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/user_namespace.c 2012-01-16 14:51:22.033408295 +0100 @@ -10,6 +10,7 @@ #include #include #include +#include /* * Create a new user namespace, deriving the creator from the user in the @@ -30,6 +31,7 @@ return -ENOMEM; kref_init(&ns->kref); + atomic_inc(&vs_global_user_ns); for (n = 0; n < UIDHASH_SZ; ++n) INIT_HLIST_HEAD(ns->uidhash_table + n); @@ -78,6 +80,8 @@ struct user_namespace *ns = container_of(kref, struct user_namespace, kref); + /* FIXME: maybe move into destroyer? */ + atomic_dec(&vs_global_user_ns); INIT_WORK(&ns->destroyer, free_user_ns_work); schedule_work(&ns->destroyer); } diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/utsname.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/utsname.c --- kernel-2.6.32.54/linux-2.6.32/kernel/utsname.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/utsname.c 2012-01-16 14:51:22.033408295 +0100 @@ -14,14 +14,17 @@ #include #include #include +#include static struct uts_namespace *create_uts_ns(void) { struct uts_namespace *uts_ns; uts_ns = kmalloc(sizeof(struct uts_namespace), GFP_KERNEL); - if (uts_ns) + if (uts_ns) { kref_init(&uts_ns->kref); + atomic_inc(&vs_global_uts_ns); + } return uts_ns; } @@ -71,5 +74,6 @@ struct uts_namespace *ns; ns = container_of(kref, struct uts_namespace, kref); + atomic_dec(&vs_global_uts_ns); kfree(ns); } diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/cacct.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/cacct.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/cacct.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/cacct.c 2012-01-16 14:51:22.033408295 +0100 @@ -0,0 +1,42 @@ +/* + * linux/kernel/vserver/cacct.c + * + * Virtual Server: Context Accounting + * + * Copyright (C) 2006-2007 Herbert Pötzl + * + * V0.01 added accounting stats + * + */ + +#include +#include +#include +#include + +#include +#include + + +int vc_sock_stat(struct vx_info *vxi, void __user *data) +{ + struct vcmd_sock_stat_v0 vc_data; + int j, field; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + field = vc_data.field; + if ((field < 0) || (field >= VXA_SOCK_SIZE)) + return -EINVAL; + + for (j = 0; j < 3; j++) { + vc_data.count[j] = vx_sock_count(&vxi->cacct, field, j); + vc_data.total[j] = vx_sock_total(&vxi->cacct, field, j); + } + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/cacct_init.h kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/cacct_init.h --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/cacct_init.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/cacct_init.h 2012-01-16 14:51:22.033408295 +0100 @@ -0,0 +1,25 @@ + + +static inline void vx_info_init_cacct(struct _vx_cacct *cacct) +{ + int i, j; + + + for (i = 0; i < VXA_SOCK_SIZE; i++) { + for (j = 0; j < 3; j++) { + atomic_long_set(&cacct->sock[i][j].count, 0); + atomic_long_set(&cacct->sock[i][j].total, 0); + } + } + for (i = 0; i < 8; i++) + atomic_set(&cacct->slab[i], 0); + for (i = 0; i < 5; i++) + for (j = 0; j < 4; j++) + atomic_set(&cacct->page[i][j], 0); +} + +static inline void vx_info_exit_cacct(struct _vx_cacct *cacct) +{ + return; +} + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/cacct_proc.h kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/cacct_proc.h --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/cacct_proc.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/cacct_proc.h 2012-01-16 14:51:22.033408295 +0100 @@ -0,0 +1,53 @@ +#ifndef _VX_CACCT_PROC_H +#define _VX_CACCT_PROC_H + +#include + + +#define VX_SOCKA_TOP \ + "Type\t recv #/bytes\t\t send #/bytes\t\t fail #/bytes\n" + +static inline int vx_info_proc_cacct(struct _vx_cacct *cacct, char *buffer) +{ + int i, j, length = 0; + static char *type[VXA_SOCK_SIZE] = { + "UNSPEC", "UNIX", "INET", "INET6", "PACKET", "OTHER" + }; + + length += sprintf(buffer + length, VX_SOCKA_TOP); + for (i = 0; i < VXA_SOCK_SIZE; i++) { + length += sprintf(buffer + length, "%s:", type[i]); + for (j = 0; j < 3; j++) { + length += sprintf(buffer + length, + "\t%10lu/%-10lu", + vx_sock_count(cacct, i, j), + vx_sock_total(cacct, i, j)); + } + buffer[length++] = '\n'; + } + + length += sprintf(buffer + length, "\n"); + length += sprintf(buffer + length, + "slab:\t %8u %8u %8u %8u\n", + atomic_read(&cacct->slab[1]), + atomic_read(&cacct->slab[4]), + atomic_read(&cacct->slab[0]), + atomic_read(&cacct->slab[2])); + + length += sprintf(buffer + length, "\n"); + for (i = 0; i < 5; i++) { + length += sprintf(buffer + length, + "page[%d]: %8u %8u %8u %8u\t %8u %8u %8u %8u\n", i, + atomic_read(&cacct->page[i][0]), + atomic_read(&cacct->page[i][1]), + atomic_read(&cacct->page[i][2]), + atomic_read(&cacct->page[i][3]), + atomic_read(&cacct->page[i][4]), + atomic_read(&cacct->page[i][5]), + atomic_read(&cacct->page[i][6]), + atomic_read(&cacct->page[i][7])); + } + return length; +} + +#endif /* _VX_CACCT_PROC_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/context.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/context.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/context.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/context.c 2012-01-16 14:51:22.041408267 +0100 @@ -0,0 +1,1058 @@ +/* + * linux/kernel/vserver/context.c + * + * Virtual Server: Context Support + * + * Copyright (C) 2003-2007 Herbert Pötzl + * + * V0.01 context helper + * V0.02 vx_ctx_kill syscall command + * V0.03 replaced context_info calls + * V0.04 redesign of struct (de)alloc + * V0.05 rlimit basic implementation + * V0.06 task_xid and info commands + * V0.07 context flags and caps + * V0.08 switch to RCU based hash + * V0.09 revert to non RCU for now + * V0.10 and back to working RCU hash + * V0.11 and back to locking again + * V0.12 referenced context store + * V0.13 separate per cpu data + * V0.14 changed vcmds to vxi arg + * V0.15 added context stat + * V0.16 have __create claim() the vxi + * V0.17 removed older and legacy stuff + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "cvirt_init.h" +#include "cacct_init.h" +#include "limit_init.h" +#include "sched_init.h" + + +atomic_t vx_global_ctotal = ATOMIC_INIT(0); +atomic_t vx_global_cactive = ATOMIC_INIT(0); + + +/* now inactive context structures */ + +static struct hlist_head vx_info_inactive = HLIST_HEAD_INIT; + +static spinlock_t vx_info_inactive_lock = SPIN_LOCK_UNLOCKED; + + +/* __alloc_vx_info() + + * allocate an initialized vx_info struct + * doesn't make it visible (hash) */ + +static struct vx_info *__alloc_vx_info(xid_t xid) +{ + struct vx_info *new = NULL; + int cpu, index; + + vxdprintk(VXD_CBIT(xid, 0), "alloc_vx_info(%d)*", xid); + + /* would this benefit from a slab cache? */ + new = kmalloc(sizeof(struct vx_info), GFP_KERNEL); + if (!new) + return 0; + + memset(new, 0, sizeof(struct vx_info)); +#ifdef CONFIG_SMP + new->ptr_pc = alloc_percpu(struct _vx_info_pc); + if (!new->ptr_pc) + goto error; +#endif + new->vx_id = xid; + INIT_HLIST_NODE(&new->vx_hlist); + atomic_set(&new->vx_usecnt, 0); + atomic_set(&new->vx_tasks, 0); + new->vx_parent = NULL; + new->vx_state = 0; + init_waitqueue_head(&new->vx_wait); + + /* prepare reaper */ + get_task_struct(init_pid_ns.child_reaper); + new->vx_reaper = init_pid_ns.child_reaper; + new->vx_badness_bias = 0; + + /* rest of init goes here */ + vx_info_init_limit(&new->limit); + vx_info_init_sched(&new->sched); + vx_info_init_cvirt(&new->cvirt); + vx_info_init_cacct(&new->cacct); + + /* per cpu data structures */ + for_each_possible_cpu(cpu) { + vx_info_init_sched_pc( + &vx_per_cpu(new, sched_pc, cpu), cpu); + vx_info_init_cvirt_pc( + &vx_per_cpu(new, cvirt_pc, cpu), cpu); + } + + new->vx_flags = VXF_INIT_SET; + cap_set_init_eff(new->vx_bcaps); + new->vx_ccaps = 0; + new->vx_umask = 0; + + new->reboot_cmd = 0; + new->exit_code = 0; + + // preconfig fs entries + for (index = 0; index < VX_SPACES; index++) { + write_lock(&init_fs.lock); + init_fs.users++; + write_unlock(&init_fs.lock); + new->vx_fs[index] = &init_fs; + } + + vxdprintk(VXD_CBIT(xid, 0), + "alloc_vx_info(%d) = %p", xid, new); + vxh_alloc_vx_info(new); + atomic_inc(&vx_global_ctotal); + return new; +#ifdef CONFIG_SMP +error: + kfree(new); + return 0; +#endif +} + +/* __dealloc_vx_info() + + * final disposal of vx_info */ + +static void __dealloc_vx_info(struct vx_info *vxi) +{ +#ifdef CONFIG_VSERVER_WARN + struct vx_info_save vxis; + int cpu; +#endif + vxdprintk(VXD_CBIT(xid, 0), + "dealloc_vx_info(%p)", vxi); + vxh_dealloc_vx_info(vxi); + +#ifdef CONFIG_VSERVER_WARN + enter_vx_info(vxi, &vxis); + vx_info_exit_limit(&vxi->limit); + vx_info_exit_sched(&vxi->sched); + vx_info_exit_cvirt(&vxi->cvirt); + vx_info_exit_cacct(&vxi->cacct); + + for_each_possible_cpu(cpu) { + vx_info_exit_sched_pc( + &vx_per_cpu(vxi, sched_pc, cpu), cpu); + vx_info_exit_cvirt_pc( + &vx_per_cpu(vxi, cvirt_pc, cpu), cpu); + } + leave_vx_info(&vxis); +#endif + + vxi->vx_id = -1; + vxi->vx_state |= VXS_RELEASED; + +#ifdef CONFIG_SMP + free_percpu(vxi->ptr_pc); +#endif + kfree(vxi); + atomic_dec(&vx_global_ctotal); +} + +static void __shutdown_vx_info(struct vx_info *vxi) +{ + struct nsproxy *nsproxy; + struct fs_struct *fs; + int index, kill; + + might_sleep(); + + vxi->vx_state |= VXS_SHUTDOWN; + vs_state_change(vxi, VSC_SHUTDOWN); + + for (index = 0; index < VX_SPACES; index++) { + nsproxy = xchg(&vxi->vx_nsproxy[index], NULL); + if (nsproxy) + put_nsproxy(nsproxy); + + fs = xchg(&vxi->vx_fs[index], NULL); + write_lock(&fs->lock); + kill = !--fs->users; + write_unlock(&fs->lock); + if (kill) + free_fs_struct(fs); + } +} + +/* exported stuff */ + +void free_vx_info(struct vx_info *vxi) +{ + unsigned long flags; + unsigned index; + + /* check for reference counts first */ + BUG_ON(atomic_read(&vxi->vx_usecnt)); + BUG_ON(atomic_read(&vxi->vx_tasks)); + + /* context must not be hashed */ + BUG_ON(vx_info_state(vxi, VXS_HASHED)); + + /* context shutdown is mandatory */ + BUG_ON(!vx_info_state(vxi, VXS_SHUTDOWN)); + + /* nsproxy and fs check */ + for (index = 0; index < VX_SPACES; index++) { + BUG_ON(vxi->vx_nsproxy[index]); + BUG_ON(vxi->vx_fs[index]); + } + + spin_lock_irqsave(&vx_info_inactive_lock, flags); + hlist_del(&vxi->vx_hlist); + spin_unlock_irqrestore(&vx_info_inactive_lock, flags); + + __dealloc_vx_info(vxi); +} + + +/* hash table for vx_info hash */ + +#define VX_HASH_SIZE 13 + +static struct hlist_head vx_info_hash[VX_HASH_SIZE] = + { [0 ... VX_HASH_SIZE-1] = HLIST_HEAD_INIT }; + +static spinlock_t vx_info_hash_lock = SPIN_LOCK_UNLOCKED; + + +static inline unsigned int __hashval(xid_t xid) +{ + return (xid % VX_HASH_SIZE); +} + + + +/* __hash_vx_info() + + * add the vxi to the global hash table + * requires the hash_lock to be held */ + +static inline void __hash_vx_info(struct vx_info *vxi) +{ + struct hlist_head *head; + + vxd_assert_lock(&vx_info_hash_lock); + vxdprintk(VXD_CBIT(xid, 4), + "__hash_vx_info: %p[#%d]", vxi, vxi->vx_id); + vxh_hash_vx_info(vxi); + + /* context must not be hashed */ + BUG_ON(vx_info_state(vxi, VXS_HASHED)); + + vxi->vx_state |= VXS_HASHED; + head = &vx_info_hash[__hashval(vxi->vx_id)]; + hlist_add_head(&vxi->vx_hlist, head); + atomic_inc(&vx_global_cactive); +} + +/* __unhash_vx_info() + + * remove the vxi from the global hash table + * requires the hash_lock to be held */ + +static inline void __unhash_vx_info(struct vx_info *vxi) +{ + unsigned long flags; + + vxd_assert_lock(&vx_info_hash_lock); + vxdprintk(VXD_CBIT(xid, 4), + "__unhash_vx_info: %p[#%d.%d.%d]", vxi, vxi->vx_id, + atomic_read(&vxi->vx_usecnt), atomic_read(&vxi->vx_tasks)); + vxh_unhash_vx_info(vxi); + + /* context must be hashed */ + BUG_ON(!vx_info_state(vxi, VXS_HASHED)); + /* but without tasks */ + BUG_ON(atomic_read(&vxi->vx_tasks)); + + vxi->vx_state &= ~VXS_HASHED; + hlist_del_init(&vxi->vx_hlist); + spin_lock_irqsave(&vx_info_inactive_lock, flags); + hlist_add_head(&vxi->vx_hlist, &vx_info_inactive); + spin_unlock_irqrestore(&vx_info_inactive_lock, flags); + atomic_dec(&vx_global_cactive); +} + + +/* __lookup_vx_info() + + * requires the hash_lock to be held + * doesn't increment the vx_refcnt */ + +static inline struct vx_info *__lookup_vx_info(xid_t xid) +{ + struct hlist_head *head = &vx_info_hash[__hashval(xid)]; + struct hlist_node *pos; + struct vx_info *vxi; + + vxd_assert_lock(&vx_info_hash_lock); + hlist_for_each(pos, head) { + vxi = hlist_entry(pos, struct vx_info, vx_hlist); + + if (vxi->vx_id == xid) + goto found; + } + vxi = NULL; +found: + vxdprintk(VXD_CBIT(xid, 0), + "__lookup_vx_info(#%u): %p[#%u]", + xid, vxi, vxi ? vxi->vx_id : 0); + vxh_lookup_vx_info(vxi, xid); + return vxi; +} + + +/* __create_vx_info() + + * create the requested context + * get(), claim() and hash it */ + +static struct vx_info *__create_vx_info(int id) +{ + struct vx_info *new, *vxi = NULL; + + vxdprintk(VXD_CBIT(xid, 1), "create_vx_info(%d)*", id); + + if (!(new = __alloc_vx_info(id))) + return ERR_PTR(-ENOMEM); + + /* required to make dynamic xids unique */ + spin_lock(&vx_info_hash_lock); + + /* static context requested */ + if ((vxi = __lookup_vx_info(id))) { + vxdprintk(VXD_CBIT(xid, 0), + "create_vx_info(%d) = %p (already there)", id, vxi); + if (vx_info_flags(vxi, VXF_STATE_SETUP, 0)) + vxi = ERR_PTR(-EBUSY); + else + vxi = ERR_PTR(-EEXIST); + goto out_unlock; + } + /* new context */ + vxdprintk(VXD_CBIT(xid, 0), + "create_vx_info(%d) = %p (new)", id, new); + claim_vx_info(new, NULL); + __hash_vx_info(get_vx_info(new)); + vxi = new, new = NULL; + +out_unlock: + spin_unlock(&vx_info_hash_lock); + vxh_create_vx_info(IS_ERR(vxi) ? NULL : vxi, id); + if (new) + __dealloc_vx_info(new); + return vxi; +} + + +/* exported stuff */ + + +void unhash_vx_info(struct vx_info *vxi) +{ + spin_lock(&vx_info_hash_lock); + __unhash_vx_info(vxi); + spin_unlock(&vx_info_hash_lock); + __shutdown_vx_info(vxi); + __wakeup_vx_info(vxi); +} + + +/* lookup_vx_info() + + * search for a vx_info and get() it + * negative id means current */ + +struct vx_info *lookup_vx_info(int id) +{ + struct vx_info *vxi = NULL; + + if (id < 0) { + vxi = get_vx_info(current_vx_info()); + } else if (id > 1) { + spin_lock(&vx_info_hash_lock); + vxi = get_vx_info(__lookup_vx_info(id)); + spin_unlock(&vx_info_hash_lock); + } + return vxi; +} + +/* xid_is_hashed() + + * verify that xid is still hashed */ + +int xid_is_hashed(xid_t xid) +{ + int hashed; + + spin_lock(&vx_info_hash_lock); + hashed = (__lookup_vx_info(xid) != NULL); + spin_unlock(&vx_info_hash_lock); + return hashed; +} + +#ifdef CONFIG_PROC_FS + +/* get_xid_list() + + * get a subset of hashed xids for proc + * assumes size is at least one */ + +int get_xid_list(int index, unsigned int *xids, int size) +{ + int hindex, nr_xids = 0; + + /* only show current and children */ + if (!vx_check(0, VS_ADMIN | VS_WATCH)) { + if (index > 0) + return 0; + xids[nr_xids] = vx_current_xid(); + return 1; + } + + for (hindex = 0; hindex < VX_HASH_SIZE; hindex++) { + struct hlist_head *head = &vx_info_hash[hindex]; + struct hlist_node *pos; + + spin_lock(&vx_info_hash_lock); + hlist_for_each(pos, head) { + struct vx_info *vxi; + + if (--index > 0) + continue; + + vxi = hlist_entry(pos, struct vx_info, vx_hlist); + xids[nr_xids] = vxi->vx_id; + if (++nr_xids >= size) { + spin_unlock(&vx_info_hash_lock); + goto out; + } + } + /* keep the lock time short */ + spin_unlock(&vx_info_hash_lock); + } +out: + return nr_xids; +} +#endif + +#ifdef CONFIG_VSERVER_DEBUG + +void dump_vx_info_inactive(int level) +{ + struct hlist_node *entry, *next; + + hlist_for_each_safe(entry, next, &vx_info_inactive) { + struct vx_info *vxi = + list_entry(entry, struct vx_info, vx_hlist); + + dump_vx_info(vxi, level); + } +} + +#endif + +#if 0 +int vx_migrate_user(struct task_struct *p, struct vx_info *vxi) +{ + struct user_struct *new_user, *old_user; + + if (!p || !vxi) + BUG(); + + if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0)) + return -EACCES; + + new_user = alloc_uid(vxi->vx_id, p->uid); + if (!new_user) + return -ENOMEM; + + old_user = p->user; + if (new_user != old_user) { + atomic_inc(&new_user->processes); + atomic_dec(&old_user->processes); + p->user = new_user; + } + free_uid(old_user); + return 0; +} +#endif + +#if 0 +void vx_mask_cap_bset(struct vx_info *vxi, struct task_struct *p) +{ + // p->cap_effective &= vxi->vx_cap_bset; + p->cap_effective = + cap_intersect(p->cap_effective, vxi->cap_bset); + // p->cap_inheritable &= vxi->vx_cap_bset; + p->cap_inheritable = + cap_intersect(p->cap_inheritable, vxi->cap_bset); + // p->cap_permitted &= vxi->vx_cap_bset; + p->cap_permitted = + cap_intersect(p->cap_permitted, vxi->cap_bset); +} +#endif + + +#include +#include + +static int vx_openfd_task(struct task_struct *tsk) +{ + struct files_struct *files = tsk->files; + struct fdtable *fdt; + const unsigned long *bptr; + int count, total; + + /* no rcu_read_lock() because of spin_lock() */ + spin_lock(&files->file_lock); + fdt = files_fdtable(files); + bptr = fdt->open_fds->fds_bits; + count = fdt->max_fds / (sizeof(unsigned long) * 8); + for (total = 0; count > 0; count--) { + if (*bptr) + total += hweight_long(*bptr); + bptr++; + } + spin_unlock(&files->file_lock); + return total; +} + + +/* for *space compatibility */ + +asmlinkage long sys_unshare(unsigned long); + +/* + * migrate task to new context + * gets vxi, puts old_vxi on change + * optionally unshares namespaces (hack) + */ + +int vx_migrate_task(struct task_struct *p, struct vx_info *vxi, int unshare) +{ + struct vx_info *old_vxi; + int ret = 0; + + if (!p || !vxi) + BUG(); + + vxdprintk(VXD_CBIT(xid, 5), + "vx_migrate_task(%p,%p[#%d.%d])", p, vxi, + vxi->vx_id, atomic_read(&vxi->vx_usecnt)); + + if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0) && + !vx_info_flags(vxi, VXF_STATE_SETUP, 0)) + return -EACCES; + + if (vx_info_state(vxi, VXS_SHUTDOWN)) + return -EFAULT; + + old_vxi = task_get_vx_info(p); + if (old_vxi == vxi) + goto out; + +// if (!(ret = vx_migrate_user(p, vxi))) { + { + int openfd; + + task_lock(p); + openfd = vx_openfd_task(p); + + if (old_vxi) { + atomic_dec(&old_vxi->cvirt.nr_threads); + atomic_dec(&old_vxi->cvirt.nr_running); + __rlim_dec(&old_vxi->limit, RLIMIT_NPROC); + /* FIXME: what about the struct files here? */ + __rlim_sub(&old_vxi->limit, VLIMIT_OPENFD, openfd); + /* account for the executable */ + __rlim_dec(&old_vxi->limit, VLIMIT_DENTRY); + } + atomic_inc(&vxi->cvirt.nr_threads); + atomic_inc(&vxi->cvirt.nr_running); + __rlim_inc(&vxi->limit, RLIMIT_NPROC); + /* FIXME: what about the struct files here? */ + __rlim_add(&vxi->limit, VLIMIT_OPENFD, openfd); + /* account for the executable */ + __rlim_inc(&vxi->limit, VLIMIT_DENTRY); + + if (old_vxi) { + release_vx_info(old_vxi, p); + clr_vx_info(&p->vx_info); + } + claim_vx_info(vxi, p); + set_vx_info(&p->vx_info, vxi); + p->xid = vxi->vx_id; + + vxdprintk(VXD_CBIT(xid, 5), + "moved task %p into vxi:%p[#%d]", + p, vxi, vxi->vx_id); + + // vx_mask_cap_bset(vxi, p); + task_unlock(p); + + /* hack for *spaces to provide compatibility */ + if (unshare) { + struct nsproxy *old_nsp, *new_nsp; + + ret = unshare_nsproxy_namespaces( + CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER, + &new_nsp, NULL); + if (ret) + goto out; + + old_nsp = xchg(&p->nsproxy, new_nsp); + vx_set_space(vxi, + CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER, 0); + put_nsproxy(old_nsp); + } + } +out: + put_vx_info(old_vxi); + return ret; +} + +int vx_set_reaper(struct vx_info *vxi, struct task_struct *p) +{ + struct task_struct *old_reaper; + + if (!vxi) + return -EINVAL; + + vxdprintk(VXD_CBIT(xid, 6), + "vx_set_reaper(%p[#%d],%p[#%d,%d])", + vxi, vxi->vx_id, p, p->xid, p->pid); + + old_reaper = vxi->vx_reaper; + if (old_reaper == p) + return 0; + + /* set new child reaper */ + get_task_struct(p); + vxi->vx_reaper = p; + put_task_struct(old_reaper); + return 0; +} + +int vx_set_init(struct vx_info *vxi, struct task_struct *p) +{ + if (!vxi) + return -EINVAL; + + vxdprintk(VXD_CBIT(xid, 6), + "vx_set_init(%p[#%d],%p[#%d,%d,%d])", + vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid); + + vxi->vx_flags &= ~VXF_STATE_INIT; + // vxi->vx_initpid = p->tgid; + vxi->vx_initpid = p->pid; + return 0; +} + +void vx_exit_init(struct vx_info *vxi, struct task_struct *p, int code) +{ + vxdprintk(VXD_CBIT(xid, 6), + "vx_exit_init(%p[#%d],%p[#%d,%d,%d])", + vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid); + + vxi->exit_code = code; + vxi->vx_initpid = 0; +} + + +void vx_set_persistent(struct vx_info *vxi) +{ + vxdprintk(VXD_CBIT(xid, 6), + "vx_set_persistent(%p[#%d])", vxi, vxi->vx_id); + + get_vx_info(vxi); + claim_vx_info(vxi, NULL); +} + +void vx_clear_persistent(struct vx_info *vxi) +{ + vxdprintk(VXD_CBIT(xid, 6), + "vx_clear_persistent(%p[#%d])", vxi, vxi->vx_id); + + release_vx_info(vxi, NULL); + put_vx_info(vxi); +} + +void vx_update_persistent(struct vx_info *vxi) +{ + if (vx_info_flags(vxi, VXF_PERSISTENT, 0)) + vx_set_persistent(vxi); + else + vx_clear_persistent(vxi); +} + + +/* task must be current or locked */ + +void exit_vx_info(struct task_struct *p, int code) +{ + struct vx_info *vxi = p->vx_info; + + if (vxi) { + atomic_dec(&vxi->cvirt.nr_threads); + vx_nproc_dec(p); + + vxi->exit_code = code; + release_vx_info(vxi, p); + } +} + +void exit_vx_info_early(struct task_struct *p, int code) +{ + struct vx_info *vxi = p->vx_info; + + if (vxi) { + if (vxi->vx_initpid == p->pid) + vx_exit_init(vxi, p, code); + if (vxi->vx_reaper == p) + vx_set_reaper(vxi, init_pid_ns.child_reaper); + } +} + + +/* vserver syscall commands below here */ + +/* taks xid and vx_info functions */ + +#include + + +int vc_task_xid(uint32_t id) +{ + xid_t xid; + + if (id) { + struct task_struct *tsk; + + read_lock(&tasklist_lock); + tsk = find_task_by_real_pid(id); + xid = (tsk) ? tsk->xid : -ESRCH; + read_unlock(&tasklist_lock); + } else + xid = vx_current_xid(); + return xid; +} + + +int vc_vx_info(struct vx_info *vxi, void __user *data) +{ + struct vcmd_vx_info_v0 vc_data; + + vc_data.xid = vxi->vx_id; + vc_data.initpid = vxi->vx_initpid; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + + +int vc_ctx_stat(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_stat_v0 vc_data; + + vc_data.usecnt = atomic_read(&vxi->vx_usecnt); + vc_data.tasks = atomic_read(&vxi->vx_tasks); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + + +/* context functions */ + +int vc_ctx_create(uint32_t xid, void __user *data) +{ + struct vcmd_ctx_create vc_data = { .flagword = VXF_INIT_SET }; + struct vx_info *new_vxi; + int ret; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + if ((xid > MAX_S_CONTEXT) || (xid < 2)) + return -EINVAL; + + new_vxi = __create_vx_info(xid); + if (IS_ERR(new_vxi)) + return PTR_ERR(new_vxi); + + /* initial flags */ + new_vxi->vx_flags = vc_data.flagword; + + ret = -ENOEXEC; + if (vs_state_change(new_vxi, VSC_STARTUP)) + goto out; + + ret = vx_migrate_task(current, new_vxi, (!data)); + if (ret) + goto out; + + /* return context id on success */ + ret = new_vxi->vx_id; + + /* get a reference for persistent contexts */ + if ((vc_data.flagword & VXF_PERSISTENT)) + vx_set_persistent(new_vxi); +out: + release_vx_info(new_vxi, NULL); + put_vx_info(new_vxi); + return ret; +} + + +int vc_ctx_migrate(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_migrate vc_data = { .flagword = 0 }; + int ret; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = vx_migrate_task(current, vxi, 0); + if (ret) + return ret; + if (vc_data.flagword & VXM_SET_INIT) + ret = vx_set_init(vxi, current); + if (ret) + return ret; + if (vc_data.flagword & VXM_SET_REAPER) + ret = vx_set_reaper(vxi, current); + return ret; +} + + +int vc_get_cflags(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_flags_v0 vc_data; + + vc_data.flagword = vxi->vx_flags; + + /* special STATE flag handling */ + vc_data.mask = vs_mask_flags(~0ULL, vxi->vx_flags, VXF_ONE_TIME); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +int vc_set_cflags(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_flags_v0 vc_data; + uint64_t mask, trigger; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + /* special STATE flag handling */ + mask = vs_mask_mask(vc_data.mask, vxi->vx_flags, VXF_ONE_TIME); + trigger = (mask & vxi->vx_flags) ^ (mask & vc_data.flagword); + + if (vxi == current_vx_info()) { + /* if (trigger & VXF_STATE_SETUP) + vx_mask_cap_bset(vxi, current); */ + if (trigger & VXF_STATE_INIT) { + int ret; + + ret = vx_set_init(vxi, current); + if (ret) + return ret; + ret = vx_set_reaper(vxi, current); + if (ret) + return ret; + } + } + + vxi->vx_flags = vs_mask_flags(vxi->vx_flags, + vc_data.flagword, mask); + if (trigger & VXF_PERSISTENT) + vx_update_persistent(vxi); + + return 0; +} + + +static inline uint64_t caps_from_cap_t(kernel_cap_t c) +{ + uint64_t v = c.cap[0] | ((uint64_t)c.cap[1] << 32); + + // printk("caps_from_cap_t(%08x:%08x) = %016llx\n", c.cap[1], c.cap[0], v); + return v; +} + +static inline kernel_cap_t cap_t_from_caps(uint64_t v) +{ + kernel_cap_t c = __cap_empty_set; + + c.cap[0] = v & 0xFFFFFFFF; + c.cap[1] = (v >> 32) & 0xFFFFFFFF; + + // printk("cap_t_from_caps(%016llx) = %08x:%08x\n", v, c.cap[1], c.cap[0]); + return c; +} + + +static int do_get_caps(struct vx_info *vxi, uint64_t *bcaps, uint64_t *ccaps) +{ + if (bcaps) + *bcaps = caps_from_cap_t(vxi->vx_bcaps); + if (ccaps) + *ccaps = vxi->vx_ccaps; + + return 0; +} + +int vc_get_ccaps(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_caps_v1 vc_data; + int ret; + + ret = do_get_caps(vxi, NULL, &vc_data.ccaps); + if (ret) + return ret; + vc_data.cmask = ~0ULL; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +static int do_set_caps(struct vx_info *vxi, + uint64_t bcaps, uint64_t bmask, uint64_t ccaps, uint64_t cmask) +{ + uint64_t bcold = caps_from_cap_t(vxi->vx_bcaps); + +#if 0 + printk("do_set_caps(%16llx, %16llx, %16llx, %16llx)\n", + bcaps, bmask, ccaps, cmask); +#endif + vxi->vx_bcaps = cap_t_from_caps( + vs_mask_flags(bcold, bcaps, bmask)); + vxi->vx_ccaps = vs_mask_flags(vxi->vx_ccaps, ccaps, cmask); + + return 0; +} + +int vc_set_ccaps(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_caps_v1 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_set_caps(vxi, 0, 0, vc_data.ccaps, vc_data.cmask); +} + +int vc_get_bcaps(struct vx_info *vxi, void __user *data) +{ + struct vcmd_bcaps vc_data; + int ret; + + ret = do_get_caps(vxi, &vc_data.bcaps, NULL); + if (ret) + return ret; + vc_data.bmask = ~0ULL; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +int vc_set_bcaps(struct vx_info *vxi, void __user *data) +{ + struct vcmd_bcaps vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_set_caps(vxi, vc_data.bcaps, vc_data.bmask, 0, 0); +} + + +int vc_get_umask(struct vx_info *vxi, void __user *data) +{ + struct vcmd_umask vc_data; + + vc_data.umask = vxi->vx_umask; + vc_data.mask = ~0ULL; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +int vc_set_umask(struct vx_info *vxi, void __user *data) +{ + struct vcmd_umask vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + vxi->vx_umask = vs_mask_flags(vxi->vx_umask, + vc_data.umask, vc_data.mask); + return 0; +} + + +int vc_get_badness(struct vx_info *vxi, void __user *data) +{ + struct vcmd_badness_v0 vc_data; + + vc_data.bias = vxi->vx_badness_bias; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +int vc_set_badness(struct vx_info *vxi, void __user *data) +{ + struct vcmd_badness_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + vxi->vx_badness_bias = vc_data.bias; + return 0; +} + +#include + +EXPORT_SYMBOL_GPL(free_vx_info); + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/cvirt.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/cvirt.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/cvirt.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/cvirt.c 2012-01-16 14:51:22.041408267 +0100 @@ -0,0 +1,304 @@ +/* + * linux/kernel/vserver/cvirt.c + * + * Virtual Server: Context Virtualization + * + * Copyright (C) 2004-2007 Herbert Pötzl + * + * V0.01 broken out from limit.c + * V0.02 added utsname stuff + * V0.03 changed vcmds to vxi arg + * + */ + +#include +#include +#include +#include +#include + +#include + + +void vx_vsi_uptime(struct timespec *uptime, struct timespec *idle) +{ + struct vx_info *vxi = current_vx_info(); + + set_normalized_timespec(uptime, + uptime->tv_sec - vxi->cvirt.bias_uptime.tv_sec, + uptime->tv_nsec - vxi->cvirt.bias_uptime.tv_nsec); + if (!idle) + return; + set_normalized_timespec(idle, + idle->tv_sec - vxi->cvirt.bias_idle.tv_sec, + idle->tv_nsec - vxi->cvirt.bias_idle.tv_nsec); + return; +} + +uint64_t vx_idle_jiffies(void) +{ + return init_task.utime + init_task.stime; +} + + + +static inline uint32_t __update_loadavg(uint32_t load, + int wsize, int delta, int n) +{ + unsigned long long calc, prev; + + /* just set it to n */ + if (unlikely(delta >= wsize)) + return (n << FSHIFT); + + calc = delta * n; + calc <<= FSHIFT; + prev = (wsize - delta); + prev *= load; + calc += prev; + do_div(calc, wsize); + return calc; +} + + +void vx_update_load(struct vx_info *vxi) +{ + uint32_t now, last, delta; + unsigned int nr_running, nr_uninterruptible; + unsigned int total; + unsigned long flags; + + spin_lock_irqsave(&vxi->cvirt.load_lock, flags); + + now = jiffies; + last = vxi->cvirt.load_last; + delta = now - last; + + if (delta < 5*HZ) + goto out; + + nr_running = atomic_read(&vxi->cvirt.nr_running); + nr_uninterruptible = atomic_read(&vxi->cvirt.nr_uninterruptible); + total = nr_running + nr_uninterruptible; + + vxi->cvirt.load[0] = __update_loadavg(vxi->cvirt.load[0], + 60*HZ, delta, total); + vxi->cvirt.load[1] = __update_loadavg(vxi->cvirt.load[1], + 5*60*HZ, delta, total); + vxi->cvirt.load[2] = __update_loadavg(vxi->cvirt.load[2], + 15*60*HZ, delta, total); + + vxi->cvirt.load_last = now; +out: + atomic_inc(&vxi->cvirt.load_updates); + spin_unlock_irqrestore(&vxi->cvirt.load_lock, flags); +} + + +/* + * Commands to do_syslog: + * + * 0 -- Close the log. Currently a NOP. + * 1 -- Open the log. Currently a NOP. + * 2 -- Read from the log. + * 3 -- Read all messages remaining in the ring buffer. + * 4 -- Read and clear all messages remaining in the ring buffer + * 5 -- Clear ring buffer. + * 6 -- Disable printk's to console + * 7 -- Enable printk's to console + * 8 -- Set level of messages printed to console + * 9 -- Return number of unread characters in the log buffer + * 10 -- Return size of the log buffer + */ +int vx_do_syslog(int type, char __user *buf, int len) +{ + int error = 0; + int do_clear = 0; + struct vx_info *vxi = current_vx_info(); + struct _vx_syslog *log; + + if (!vxi) + return -EINVAL; + log = &vxi->cvirt.syslog; + + switch (type) { + case 0: /* Close log */ + case 1: /* Open log */ + break; + case 2: /* Read from log */ + error = wait_event_interruptible(log->log_wait, + (log->log_start - log->log_end)); + if (error) + break; + spin_lock_irq(&log->logbuf_lock); + spin_unlock_irq(&log->logbuf_lock); + break; + case 4: /* Read/clear last kernel messages */ + do_clear = 1; + /* fall through */ + case 3: /* Read last kernel messages */ + return 0; + + case 5: /* Clear ring buffer */ + return 0; + + case 6: /* Disable logging to console */ + case 7: /* Enable logging to console */ + case 8: /* Set level of messages printed to console */ + break; + + case 9: /* Number of chars in the log buffer */ + return 0; + case 10: /* Size of the log buffer */ + return 0; + default: + error = -EINVAL; + break; + } + return error; +} + + +/* virtual host info names */ + +static char *vx_vhi_name(struct vx_info *vxi, int id) +{ + struct nsproxy *nsproxy; + struct uts_namespace *uts; + + if (id == VHIN_CONTEXT) + return vxi->vx_name; + + nsproxy = vxi->vx_nsproxy[0]; + if (!nsproxy) + return NULL; + + uts = nsproxy->uts_ns; + if (!uts) + return NULL; + + switch (id) { + case VHIN_SYSNAME: + return uts->name.sysname; + case VHIN_NODENAME: + return uts->name.nodename; + case VHIN_RELEASE: + return uts->name.release; + case VHIN_VERSION: + return uts->name.version; + case VHIN_MACHINE: + return uts->name.machine; + case VHIN_DOMAINNAME: + return uts->name.domainname; + default: + return NULL; + } + return NULL; +} + +int vc_set_vhi_name(struct vx_info *vxi, void __user *data) +{ + struct vcmd_vhi_name_v0 vc_data; + char *name; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + name = vx_vhi_name(vxi, vc_data.field); + if (!name) + return -EINVAL; + + memcpy(name, vc_data.name, 65); + return 0; +} + +int vc_get_vhi_name(struct vx_info *vxi, void __user *data) +{ + struct vcmd_vhi_name_v0 vc_data; + char *name; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + name = vx_vhi_name(vxi, vc_data.field); + if (!name) + return -EINVAL; + + memcpy(vc_data.name, name, 65); + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + + +int vc_virt_stat(struct vx_info *vxi, void __user *data) +{ + struct vcmd_virt_stat_v0 vc_data; + struct _vx_cvirt *cvirt = &vxi->cvirt; + struct timespec uptime; + + do_posix_clock_monotonic_gettime(&uptime); + set_normalized_timespec(&uptime, + uptime.tv_sec - cvirt->bias_uptime.tv_sec, + uptime.tv_nsec - cvirt->bias_uptime.tv_nsec); + + vc_data.offset = timeval_to_ns(&cvirt->bias_tv); + vc_data.uptime = timespec_to_ns(&uptime); + vc_data.nr_threads = atomic_read(&cvirt->nr_threads); + vc_data.nr_running = atomic_read(&cvirt->nr_running); + vc_data.nr_uninterruptible = atomic_read(&cvirt->nr_uninterruptible); + vc_data.nr_onhold = atomic_read(&cvirt->nr_onhold); + vc_data.nr_forks = atomic_read(&cvirt->total_forks); + vc_data.load[0] = cvirt->load[0]; + vc_data.load[1] = cvirt->load[1]; + vc_data.load[2] = cvirt->load[2]; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + + +#ifdef CONFIG_VSERVER_VTIME + +/* virtualized time base */ + +void vx_gettimeofday(struct timeval *tv) +{ + struct vx_info *vxi; + + do_gettimeofday(tv); + if (!vx_flags(VXF_VIRT_TIME, 0)) + return; + + vxi = current_vx_info(); + tv->tv_sec += vxi->cvirt.bias_tv.tv_sec; + tv->tv_usec += vxi->cvirt.bias_tv.tv_usec; + + if (tv->tv_usec >= USEC_PER_SEC) { + tv->tv_sec++; + tv->tv_usec -= USEC_PER_SEC; + } else if (tv->tv_usec < 0) { + tv->tv_sec--; + tv->tv_usec += USEC_PER_SEC; + } +} + +int vx_settimeofday(struct timespec *ts) +{ + struct timeval tv; + struct vx_info *vxi; + + if (!vx_flags(VXF_VIRT_TIME, 0)) + return do_settimeofday(ts); + + do_gettimeofday(&tv); + vxi = current_vx_info(); + vxi->cvirt.bias_tv.tv_sec = ts->tv_sec - tv.tv_sec; + vxi->cvirt.bias_tv.tv_usec = + (ts->tv_nsec/NSEC_PER_USEC) - tv.tv_usec; + return 0; +} + +#endif + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/cvirt_init.h kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/cvirt_init.h --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/cvirt_init.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/cvirt_init.h 2012-01-16 14:51:22.041408267 +0100 @@ -0,0 +1,69 @@ + + +extern uint64_t vx_idle_jiffies(void); + +static inline void vx_info_init_cvirt(struct _vx_cvirt *cvirt) +{ + uint64_t idle_jiffies = vx_idle_jiffies(); + uint64_t nsuptime; + + do_posix_clock_monotonic_gettime(&cvirt->bias_uptime); + nsuptime = (unsigned long long)cvirt->bias_uptime.tv_sec + * NSEC_PER_SEC + cvirt->bias_uptime.tv_nsec; + cvirt->bias_clock = nsec_to_clock_t(nsuptime); + cvirt->bias_tv.tv_sec = 0; + cvirt->bias_tv.tv_usec = 0; + + jiffies_to_timespec(idle_jiffies, &cvirt->bias_idle); + atomic_set(&cvirt->nr_threads, 0); + atomic_set(&cvirt->nr_running, 0); + atomic_set(&cvirt->nr_uninterruptible, 0); + atomic_set(&cvirt->nr_onhold, 0); + + spin_lock_init(&cvirt->load_lock); + cvirt->load_last = jiffies; + atomic_set(&cvirt->load_updates, 0); + cvirt->load[0] = 0; + cvirt->load[1] = 0; + cvirt->load[2] = 0; + atomic_set(&cvirt->total_forks, 0); + + spin_lock_init(&cvirt->syslog.logbuf_lock); + init_waitqueue_head(&cvirt->syslog.log_wait); + cvirt->syslog.log_start = 0; + cvirt->syslog.log_end = 0; + cvirt->syslog.con_start = 0; + cvirt->syslog.logged_chars = 0; +} + +static inline +void vx_info_init_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc, int cpu) +{ + // cvirt_pc->cpustat = { 0 }; +} + +static inline void vx_info_exit_cvirt(struct _vx_cvirt *cvirt) +{ + int value; + + vxwprintk_xid((value = atomic_read(&cvirt->nr_threads)), + "!!! cvirt: %p[nr_threads] = %d on exit.", + cvirt, value); + vxwprintk_xid((value = atomic_read(&cvirt->nr_running)), + "!!! cvirt: %p[nr_running] = %d on exit.", + cvirt, value); + vxwprintk_xid((value = atomic_read(&cvirt->nr_uninterruptible)), + "!!! cvirt: %p[nr_uninterruptible] = %d on exit.", + cvirt, value); + vxwprintk_xid((value = atomic_read(&cvirt->nr_onhold)), + "!!! cvirt: %p[nr_onhold] = %d on exit.", + cvirt, value); + return; +} + +static inline +void vx_info_exit_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc, int cpu) +{ + return; +} + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/cvirt_proc.h kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/cvirt_proc.h --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/cvirt_proc.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/cvirt_proc.h 2012-01-16 14:51:22.041408267 +0100 @@ -0,0 +1,135 @@ +#ifndef _VX_CVIRT_PROC_H +#define _VX_CVIRT_PROC_H + +#include +#include +#include +#include +#include + + +static inline +int vx_info_proc_nsproxy(struct nsproxy *nsproxy, char *buffer) +{ + struct mnt_namespace *ns; + struct uts_namespace *uts; + struct ipc_namespace *ipc; + struct path path; + char *pstr, *root; + int length = 0; + + if (!nsproxy) + goto out; + + length += sprintf(buffer + length, + "NSProxy:\t%p [%p,%p,%p]\n", + nsproxy, nsproxy->mnt_ns, + nsproxy->uts_ns, nsproxy->ipc_ns); + + ns = nsproxy->mnt_ns; + if (!ns) + goto skip_ns; + + pstr = kmalloc(PATH_MAX, GFP_KERNEL); + if (!pstr) + goto skip_ns; + + path.mnt = ns->root; + path.dentry = ns->root->mnt_root; + root = d_path(&path, pstr, PATH_MAX - 2); + length += sprintf(buffer + length, + "Namespace:\t%p [#%u]\n" + "RootPath:\t%s\n", + ns, atomic_read(&ns->count), + root); + kfree(pstr); +skip_ns: + + uts = nsproxy->uts_ns; + if (!uts) + goto skip_uts; + + length += sprintf(buffer + length, + "SysName:\t%.*s\n" + "NodeName:\t%.*s\n" + "Release:\t%.*s\n" + "Version:\t%.*s\n" + "Machine:\t%.*s\n" + "DomainName:\t%.*s\n", + __NEW_UTS_LEN, uts->name.sysname, + __NEW_UTS_LEN, uts->name.nodename, + __NEW_UTS_LEN, uts->name.release, + __NEW_UTS_LEN, uts->name.version, + __NEW_UTS_LEN, uts->name.machine, + __NEW_UTS_LEN, uts->name.domainname); +skip_uts: + + ipc = nsproxy->ipc_ns; + if (!ipc) + goto skip_ipc; + + length += sprintf(buffer + length, + "SEMS:\t\t%d %d %d %d %d\n" + "MSG:\t\t%d %d %d\n" + "SHM:\t\t%lu %lu %d %d\n", + ipc->sem_ctls[0], ipc->sem_ctls[1], + ipc->sem_ctls[2], ipc->sem_ctls[3], + ipc->used_sems, + ipc->msg_ctlmax, ipc->msg_ctlmnb, ipc->msg_ctlmni, + (unsigned long)ipc->shm_ctlmax, + (unsigned long)ipc->shm_ctlall, + ipc->shm_ctlmni, ipc->shm_tot); +skip_ipc: +out: + return length; +} + + +#include + +#define LOAD_INT(x) ((x) >> FSHIFT) +#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1 - 1)) * 100) + +static inline +int vx_info_proc_cvirt(struct _vx_cvirt *cvirt, char *buffer) +{ + int length = 0; + int a, b, c; + + length += sprintf(buffer + length, + "BiasUptime:\t%lu.%02lu\n", + (unsigned long)cvirt->bias_uptime.tv_sec, + (cvirt->bias_uptime.tv_nsec / (NSEC_PER_SEC / 100))); + + a = cvirt->load[0] + (FIXED_1 / 200); + b = cvirt->load[1] + (FIXED_1 / 200); + c = cvirt->load[2] + (FIXED_1 / 200); + length += sprintf(buffer + length, + "nr_threads:\t%d\n" + "nr_running:\t%d\n" + "nr_unintr:\t%d\n" + "nr_onhold:\t%d\n" + "load_updates:\t%d\n" + "loadavg:\t%d.%02d %d.%02d %d.%02d\n" + "total_forks:\t%d\n", + atomic_read(&cvirt->nr_threads), + atomic_read(&cvirt->nr_running), + atomic_read(&cvirt->nr_uninterruptible), + atomic_read(&cvirt->nr_onhold), + atomic_read(&cvirt->load_updates), + LOAD_INT(a), LOAD_FRAC(a), + LOAD_INT(b), LOAD_FRAC(b), + LOAD_INT(c), LOAD_FRAC(c), + atomic_read(&cvirt->total_forks)); + return length; +} + +static inline +int vx_info_proc_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc, + char *buffer, int cpu) +{ + int length = 0; + return length; +} + +#endif /* _VX_CVIRT_PROC_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/debug.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/debug.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/debug.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/debug.c 2012-01-16 14:51:22.045408253 +0100 @@ -0,0 +1,32 @@ +/* + * kernel/vserver/debug.c + * + * Copyright (C) 2005-2007 Herbert Pötzl + * + * V0.01 vx_info dump support + * + */ + +#include + +#include + + +void dump_vx_info(struct vx_info *vxi, int level) +{ + printk("vx_info %p[#%d, %d.%d, %4x]\n", vxi, vxi->vx_id, + atomic_read(&vxi->vx_usecnt), + atomic_read(&vxi->vx_tasks), + vxi->vx_state); + if (level > 0) { + __dump_vx_limit(&vxi->limit); + __dump_vx_sched(&vxi->sched); + __dump_vx_cvirt(&vxi->cvirt); + __dump_vx_cacct(&vxi->cacct); + } + printk("---\n"); +} + + +EXPORT_SYMBOL_GPL(dump_vx_info); + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/device.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/device.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/device.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/device.c 2012-01-16 14:51:22.045408253 +0100 @@ -0,0 +1,443 @@ +/* + * linux/kernel/vserver/device.c + * + * Linux-VServer: Device Support + * + * Copyright (C) 2006 Herbert Pötzl + * Copyright (C) 2007 Daniel Hokka Zakrisson + * + * V0.01 device mapping basics + * V0.02 added defaults + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + + +#define DMAP_HASH_BITS 4 + + +struct vs_mapping { + union { + struct hlist_node hlist; + struct list_head list; + } u; +#define dm_hlist u.hlist +#define dm_list u.list + xid_t xid; + dev_t device; + struct vx_dmap_target target; +}; + + +static struct hlist_head dmap_main_hash[1 << DMAP_HASH_BITS]; + +static spinlock_t dmap_main_hash_lock = SPIN_LOCK_UNLOCKED; + +static struct vx_dmap_target dmap_defaults[2] = { + { .flags = DATTR_OPEN }, + { .flags = DATTR_OPEN }, +}; + + +struct kmem_cache *dmap_cachep __read_mostly; + +int __init dmap_cache_init(void) +{ + dmap_cachep = kmem_cache_create("dmap_cache", + sizeof(struct vs_mapping), 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + return 0; +} + +__initcall(dmap_cache_init); + + +static inline unsigned int __hashval(dev_t dev, int bits) +{ + return hash_long((unsigned long)dev, bits); +} + + +/* __hash_mapping() + * add the mapping to the hash table + */ +static inline void __hash_mapping(struct vx_info *vxi, struct vs_mapping *vdm) +{ + spinlock_t *hash_lock = &dmap_main_hash_lock; + struct hlist_head *head, *hash = dmap_main_hash; + int device = vdm->device; + + spin_lock(hash_lock); + vxdprintk(VXD_CBIT(misc, 8), "__hash_mapping: %p[#%d] %08x:%08x", + vxi, vxi ? vxi->vx_id : 0, device, vdm->target.target); + + head = &hash[__hashval(device, DMAP_HASH_BITS)]; + hlist_add_head(&vdm->dm_hlist, head); + spin_unlock(hash_lock); +} + + +static inline int __mode_to_default(umode_t mode) +{ + switch (mode) { + case S_IFBLK: + return 0; + case S_IFCHR: + return 1; + default: + BUG(); + } +} + + +/* __set_default() + * set a default + */ +static inline void __set_default(struct vx_info *vxi, umode_t mode, + struct vx_dmap_target *vdmt) +{ + spinlock_t *hash_lock = &dmap_main_hash_lock; + spin_lock(hash_lock); + + if (vxi) + vxi->dmap.targets[__mode_to_default(mode)] = *vdmt; + else + dmap_defaults[__mode_to_default(mode)] = *vdmt; + + + spin_unlock(hash_lock); + + vxdprintk(VXD_CBIT(misc, 8), "__set_default: %p[#%u] %08x %04x", + vxi, vxi ? vxi->vx_id : 0, vdmt->target, vdmt->flags); +} + + +/* __remove_default() + * remove a default + */ +static inline int __remove_default(struct vx_info *vxi, umode_t mode) +{ + spinlock_t *hash_lock = &dmap_main_hash_lock; + spin_lock(hash_lock); + + if (vxi) + vxi->dmap.targets[__mode_to_default(mode)].flags = 0; + else /* remove == reset */ + dmap_defaults[__mode_to_default(mode)].flags = DATTR_OPEN | mode; + + spin_unlock(hash_lock); + return 0; +} + + +/* __find_mapping() + * find a mapping in the hash table + * + * caller must hold hash_lock + */ +static inline int __find_mapping(xid_t xid, dev_t device, umode_t mode, + struct vs_mapping **local, struct vs_mapping **global) +{ + struct hlist_head *hash = dmap_main_hash; + struct hlist_head *head = &hash[__hashval(device, DMAP_HASH_BITS)]; + struct hlist_node *pos; + struct vs_mapping *vdm; + + *local = NULL; + if (global) + *global = NULL; + + hlist_for_each(pos, head) { + vdm = hlist_entry(pos, struct vs_mapping, dm_hlist); + + if ((vdm->device == device) && + !((vdm->target.flags ^ mode) & S_IFMT)) { + if (vdm->xid == xid) { + *local = vdm; + return 1; + } else if (global && vdm->xid == 0) + *global = vdm; + } + } + + if (global && *global) + return 0; + else + return -ENOENT; +} + + +/* __lookup_mapping() + * find a mapping and store the result in target and flags + */ +static inline int __lookup_mapping(struct vx_info *vxi, + dev_t device, dev_t *target, int *flags, umode_t mode) +{ + spinlock_t *hash_lock = &dmap_main_hash_lock; + struct vs_mapping *vdm, *global; + struct vx_dmap_target *vdmt; + int ret = 0; + xid_t xid = vxi->vx_id; + int index; + + spin_lock(hash_lock); + if (__find_mapping(xid, device, mode, &vdm, &global) > 0) { + ret = 1; + vdmt = &vdm->target; + goto found; + } + + index = __mode_to_default(mode); + if (vxi && vxi->dmap.targets[index].flags) { + ret = 2; + vdmt = &vxi->dmap.targets[index]; + } else if (global) { + ret = 3; + vdmt = &global->target; + goto found; + } else { + ret = 4; + vdmt = &dmap_defaults[index]; + } + +found: + if (target && (vdmt->flags & DATTR_REMAP)) + *target = vdmt->target; + else if (target) + *target = device; + if (flags) + *flags = vdmt->flags; + + spin_unlock(hash_lock); + + return ret; +} + + +/* __remove_mapping() + * remove a mapping from the hash table + */ +static inline int __remove_mapping(struct vx_info *vxi, dev_t device, + umode_t mode) +{ + spinlock_t *hash_lock = &dmap_main_hash_lock; + struct vs_mapping *vdm = NULL; + int ret = 0; + + spin_lock(hash_lock); + + ret = __find_mapping((vxi ? vxi->vx_id : 0), device, mode, &vdm, + NULL); + vxdprintk(VXD_CBIT(misc, 8), "__remove_mapping: %p[#%d] %08x %04x", + vxi, vxi ? vxi->vx_id : 0, device, mode); + if (ret < 0) + goto out; + hlist_del(&vdm->dm_hlist); + +out: + spin_unlock(hash_lock); + if (vdm) + kmem_cache_free(dmap_cachep, vdm); + return ret; +} + + + +int vs_map_device(struct vx_info *vxi, + dev_t device, dev_t *target, umode_t mode) +{ + int ret, flags = DATTR_MASK; + + if (!vxi) { + if (target) + *target = device; + goto out; + } + ret = __lookup_mapping(vxi, device, target, &flags, mode); + vxdprintk(VXD_CBIT(misc, 8), "vs_map_device: %08x target: %08x flags: %04x mode: %04x mapped=%d", + device, target ? *target : 0, flags, mode, ret); +out: + return (flags & DATTR_MASK); +} + + + +static int do_set_mapping(struct vx_info *vxi, + dev_t device, dev_t target, int flags, umode_t mode) +{ + if (device) { + struct vs_mapping *new; + + new = kmem_cache_alloc(dmap_cachep, GFP_KERNEL); + if (!new) + return -ENOMEM; + + INIT_HLIST_NODE(&new->dm_hlist); + new->device = device; + new->target.target = target; + new->target.flags = flags | mode; + new->xid = (vxi ? vxi->vx_id : 0); + + vxdprintk(VXD_CBIT(misc, 8), "do_set_mapping: %08x target: %08x flags: %04x", device, target, flags); + __hash_mapping(vxi, new); + } else { + struct vx_dmap_target new = { + .target = target, + .flags = flags | mode, + }; + __set_default(vxi, mode, &new); + } + return 0; +} + + +static int do_unset_mapping(struct vx_info *vxi, + dev_t device, dev_t target, int flags, umode_t mode) +{ + int ret = -EINVAL; + + if (device) { + ret = __remove_mapping(vxi, device, mode); + if (ret < 0) + goto out; + } else { + ret = __remove_default(vxi, mode); + if (ret < 0) + goto out; + } + +out: + return ret; +} + + +static inline int __user_device(const char __user *name, dev_t *dev, + umode_t *mode) +{ + struct nameidata nd; + int ret; + + if (!name) { + *dev = 0; + return 0; + } + ret = user_lpath(name, &nd.path); + if (ret) + return ret; + if (nd.path.dentry->d_inode) { + *dev = nd.path.dentry->d_inode->i_rdev; + *mode = nd.path.dentry->d_inode->i_mode; + } + path_put(&nd.path); + return 0; +} + +static inline int __mapping_mode(dev_t device, dev_t target, + umode_t device_mode, umode_t target_mode, umode_t *mode) +{ + if (device) + *mode = device_mode & S_IFMT; + else if (target) + *mode = target_mode & S_IFMT; + else + return -EINVAL; + + /* if both given, device and target mode have to match */ + if (device && target && + ((device_mode ^ target_mode) & S_IFMT)) + return -EINVAL; + return 0; +} + + +static inline int do_mapping(struct vx_info *vxi, const char __user *device_path, + const char __user *target_path, int flags, int set) +{ + dev_t device = ~0, target = ~0; + umode_t device_mode = 0, target_mode = 0, mode; + int ret; + + ret = __user_device(device_path, &device, &device_mode); + if (ret) + return ret; + ret = __user_device(target_path, &target, &target_mode); + if (ret) + return ret; + + ret = __mapping_mode(device, target, + device_mode, target_mode, &mode); + if (ret) + return ret; + + if (set) + return do_set_mapping(vxi, device, target, + flags, mode); + else + return do_unset_mapping(vxi, device, target, + flags, mode); +} + + +int vc_set_mapping(struct vx_info *vxi, void __user *data) +{ + struct vcmd_set_mapping_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_mapping(vxi, vc_data.device, vc_data.target, + vc_data.flags, 1); +} + +int vc_unset_mapping(struct vx_info *vxi, void __user *data) +{ + struct vcmd_set_mapping_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_mapping(vxi, vc_data.device, vc_data.target, + vc_data.flags, 0); +} + + +#ifdef CONFIG_COMPAT + +int vc_set_mapping_x32(struct vx_info *vxi, void __user *data) +{ + struct vcmd_set_mapping_v0_x32 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_mapping(vxi, compat_ptr(vc_data.device_ptr), + compat_ptr(vc_data.target_ptr), vc_data.flags, 1); +} + +int vc_unset_mapping_x32(struct vx_info *vxi, void __user *data) +{ + struct vcmd_set_mapping_v0_x32 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_mapping(vxi, compat_ptr(vc_data.device_ptr), + compat_ptr(vc_data.target_ptr), vc_data.flags, 0); +} + +#endif /* CONFIG_COMPAT */ + + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/dlimit.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/dlimit.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/dlimit.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/dlimit.c 2012-01-16 14:51:22.045408253 +0100 @@ -0,0 +1,529 @@ +/* + * linux/kernel/vserver/dlimit.c + * + * Virtual Server: Context Disk Limits + * + * Copyright (C) 2004-2009 Herbert Pötzl + * + * V0.01 initial version + * V0.02 compat32 splitup + * V0.03 extended interface + * + */ + +#include +#include +#include +#include +#include +#include + +#include + +/* __alloc_dl_info() + + * allocate an initialized dl_info struct + * doesn't make it visible (hash) */ + +static struct dl_info *__alloc_dl_info(struct super_block *sb, tag_t tag) +{ + struct dl_info *new = NULL; + + vxdprintk(VXD_CBIT(dlim, 5), + "alloc_dl_info(%p,%d)*", sb, tag); + + /* would this benefit from a slab cache? */ + new = kmalloc(sizeof(struct dl_info), GFP_KERNEL); + if (!new) + return 0; + + memset(new, 0, sizeof(struct dl_info)); + new->dl_tag = tag; + new->dl_sb = sb; + INIT_RCU_HEAD(&new->dl_rcu); + INIT_HLIST_NODE(&new->dl_hlist); + spin_lock_init(&new->dl_lock); + atomic_set(&new->dl_refcnt, 0); + atomic_set(&new->dl_usecnt, 0); + + /* rest of init goes here */ + + vxdprintk(VXD_CBIT(dlim, 4), + "alloc_dl_info(%p,%d) = %p", sb, tag, new); + return new; +} + +/* __dealloc_dl_info() + + * final disposal of dl_info */ + +static void __dealloc_dl_info(struct dl_info *dli) +{ + vxdprintk(VXD_CBIT(dlim, 4), + "dealloc_dl_info(%p)", dli); + + dli->dl_hlist.next = LIST_POISON1; + dli->dl_tag = -1; + dli->dl_sb = 0; + + BUG_ON(atomic_read(&dli->dl_usecnt)); + BUG_ON(atomic_read(&dli->dl_refcnt)); + + kfree(dli); +} + + +/* hash table for dl_info hash */ + +#define DL_HASH_SIZE 13 + +struct hlist_head dl_info_hash[DL_HASH_SIZE]; + +static spinlock_t dl_info_hash_lock = SPIN_LOCK_UNLOCKED; + + +static inline unsigned int __hashval(struct super_block *sb, tag_t tag) +{ + return ((tag ^ (unsigned long)sb) % DL_HASH_SIZE); +} + + + +/* __hash_dl_info() + + * add the dli to the global hash table + * requires the hash_lock to be held */ + +static inline void __hash_dl_info(struct dl_info *dli) +{ + struct hlist_head *head; + + vxdprintk(VXD_CBIT(dlim, 6), + "__hash_dl_info: %p[#%d]", dli, dli->dl_tag); + get_dl_info(dli); + head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_tag)]; + hlist_add_head_rcu(&dli->dl_hlist, head); +} + +/* __unhash_dl_info() + + * remove the dli from the global hash table + * requires the hash_lock to be held */ + +static inline void __unhash_dl_info(struct dl_info *dli) +{ + vxdprintk(VXD_CBIT(dlim, 6), + "__unhash_dl_info: %p[#%d]", dli, dli->dl_tag); + hlist_del_rcu(&dli->dl_hlist); + put_dl_info(dli); +} + + +/* __lookup_dl_info() + + * requires the rcu_read_lock() + * doesn't increment the dl_refcnt */ + +static inline struct dl_info *__lookup_dl_info(struct super_block *sb, tag_t tag) +{ + struct hlist_head *head = &dl_info_hash[__hashval(sb, tag)]; + struct hlist_node *pos; + struct dl_info *dli; + + hlist_for_each_entry_rcu(dli, pos, head, dl_hlist) { + + if (dli->dl_tag == tag && dli->dl_sb == sb) { + return dli; + } + } + return NULL; +} + + +struct dl_info *locate_dl_info(struct super_block *sb, tag_t tag) +{ + struct dl_info *dli; + + rcu_read_lock(); + dli = get_dl_info(__lookup_dl_info(sb, tag)); + vxdprintk(VXD_CBIT(dlim, 7), + "locate_dl_info(%p,#%d) = %p", sb, tag, dli); + rcu_read_unlock(); + return dli; +} + +void rcu_free_dl_info(struct rcu_head *head) +{ + struct dl_info *dli = container_of(head, struct dl_info, dl_rcu); + int usecnt, refcnt; + + BUG_ON(!dli || !head); + + usecnt = atomic_read(&dli->dl_usecnt); + BUG_ON(usecnt < 0); + + refcnt = atomic_read(&dli->dl_refcnt); + BUG_ON(refcnt < 0); + + vxdprintk(VXD_CBIT(dlim, 3), + "rcu_free_dl_info(%p)", dli); + if (!usecnt) + __dealloc_dl_info(dli); + else + printk("!!! rcu didn't free\n"); +} + + + + +static int do_addrem_dlimit(uint32_t id, const char __user *name, + uint32_t flags, int add) +{ + struct path path; + int ret; + + ret = user_lpath(name, &path); + if (!ret) { + struct super_block *sb; + struct dl_info *dli; + + ret = -EINVAL; + if (!path.dentry->d_inode) + goto out_release; + if (!(sb = path.dentry->d_inode->i_sb)) + goto out_release; + + if (add) { + dli = __alloc_dl_info(sb, id); + spin_lock(&dl_info_hash_lock); + + ret = -EEXIST; + if (__lookup_dl_info(sb, id)) + goto out_unlock; + __hash_dl_info(dli); + dli = NULL; + } else { + spin_lock(&dl_info_hash_lock); + dli = __lookup_dl_info(sb, id); + + ret = -ESRCH; + if (!dli) + goto out_unlock; + __unhash_dl_info(dli); + } + ret = 0; + out_unlock: + spin_unlock(&dl_info_hash_lock); + if (add && dli) + __dealloc_dl_info(dli); + out_release: + path_put(&path); + } + return ret; +} + +int vc_add_dlimit(uint32_t id, void __user *data) +{ + struct vcmd_ctx_dlimit_base_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 1); +} + +int vc_rem_dlimit(uint32_t id, void __user *data) +{ + struct vcmd_ctx_dlimit_base_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 0); +} + +#ifdef CONFIG_COMPAT + +int vc_add_dlimit_x32(uint32_t id, void __user *data) +{ + struct vcmd_ctx_dlimit_base_v0_x32 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_addrem_dlimit(id, + compat_ptr(vc_data.name_ptr), vc_data.flags, 1); +} + +int vc_rem_dlimit_x32(uint32_t id, void __user *data) +{ + struct vcmd_ctx_dlimit_base_v0_x32 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_addrem_dlimit(id, + compat_ptr(vc_data.name_ptr), vc_data.flags, 0); +} + +#endif /* CONFIG_COMPAT */ + + +static inline +int do_set_dlimit(uint32_t id, const char __user *name, + uint32_t space_used, uint32_t space_total, + uint32_t inodes_used, uint32_t inodes_total, + uint32_t reserved, uint32_t flags) +{ + struct path path; + int ret; + + ret = user_lpath(name, &path); + if (!ret) { + struct super_block *sb; + struct dl_info *dli; + + ret = -EINVAL; + if (!path.dentry->d_inode) + goto out_release; + if (!(sb = path.dentry->d_inode->i_sb)) + goto out_release; + + /* sanity checks */ + if ((reserved != CDLIM_KEEP && + reserved > 100) || + (inodes_used != CDLIM_KEEP && + inodes_used > inodes_total) || + (space_used != CDLIM_KEEP && + space_used > space_total)) + goto out_release; + + ret = -ESRCH; + dli = locate_dl_info(sb, id); + if (!dli) + goto out_release; + + spin_lock(&dli->dl_lock); + + if (inodes_used != CDLIM_KEEP) + dli->dl_inodes_used = inodes_used; + if (inodes_total != CDLIM_KEEP) + dli->dl_inodes_total = inodes_total; + if (space_used != CDLIM_KEEP) + dli->dl_space_used = dlimit_space_32to64( + space_used, flags, DLIMS_USED); + + if (space_total == CDLIM_INFINITY) + dli->dl_space_total = DLIM_INFINITY; + else if (space_total != CDLIM_KEEP) + dli->dl_space_total = dlimit_space_32to64( + space_total, flags, DLIMS_TOTAL); + + if (reserved != CDLIM_KEEP) + dli->dl_nrlmult = (1 << 10) * (100 - reserved) / 100; + + spin_unlock(&dli->dl_lock); + + put_dl_info(dli); + ret = 0; + + out_release: + path_put(&path); + } + return ret; +} + +int vc_set_dlimit(uint32_t id, void __user *data) +{ + struct vcmd_ctx_dlimit_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_set_dlimit(id, vc_data.name, + vc_data.space_used, vc_data.space_total, + vc_data.inodes_used, vc_data.inodes_total, + vc_data.reserved, vc_data.flags); +} + +#ifdef CONFIG_COMPAT + +int vc_set_dlimit_x32(uint32_t id, void __user *data) +{ + struct vcmd_ctx_dlimit_v0_x32 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_set_dlimit(id, compat_ptr(vc_data.name_ptr), + vc_data.space_used, vc_data.space_total, + vc_data.inodes_used, vc_data.inodes_total, + vc_data.reserved, vc_data.flags); +} + +#endif /* CONFIG_COMPAT */ + + +static inline +int do_get_dlimit(uint32_t id, const char __user *name, + uint32_t *space_used, uint32_t *space_total, + uint32_t *inodes_used, uint32_t *inodes_total, + uint32_t *reserved, uint32_t *flags) +{ + struct path path; + int ret; + + ret = user_lpath(name, &path); + if (!ret) { + struct super_block *sb; + struct dl_info *dli; + + ret = -EINVAL; + if (!path.dentry->d_inode) + goto out_release; + if (!(sb = path.dentry->d_inode->i_sb)) + goto out_release; + + ret = -ESRCH; + dli = locate_dl_info(sb, id); + if (!dli) + goto out_release; + + spin_lock(&dli->dl_lock); + *inodes_used = dli->dl_inodes_used; + *inodes_total = dli->dl_inodes_total; + + *space_used = dlimit_space_64to32( + dli->dl_space_used, flags, DLIMS_USED); + + if (dli->dl_space_total == DLIM_INFINITY) + *space_total = CDLIM_INFINITY; + else + *space_total = dlimit_space_64to32( + dli->dl_space_total, flags, DLIMS_TOTAL); + + *reserved = 100 - ((dli->dl_nrlmult * 100 + 512) >> 10); + spin_unlock(&dli->dl_lock); + + put_dl_info(dli); + ret = -EFAULT; + + ret = 0; + out_release: + path_put(&path); + } + return ret; +} + + +int vc_get_dlimit(uint32_t id, void __user *data) +{ + struct vcmd_ctx_dlimit_v0 vc_data; + int ret; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = do_get_dlimit(id, vc_data.name, + &vc_data.space_used, &vc_data.space_total, + &vc_data.inodes_used, &vc_data.inodes_total, + &vc_data.reserved, &vc_data.flags); + if (ret) + return ret; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +#ifdef CONFIG_COMPAT + +int vc_get_dlimit_x32(uint32_t id, void __user *data) +{ + struct vcmd_ctx_dlimit_v0_x32 vc_data; + int ret; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = do_get_dlimit(id, compat_ptr(vc_data.name_ptr), + &vc_data.space_used, &vc_data.space_total, + &vc_data.inodes_used, &vc_data.inodes_total, + &vc_data.reserved, &vc_data.flags); + if (ret) + return ret; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +#endif /* CONFIG_COMPAT */ + + +void vx_vsi_statfs(struct super_block *sb, struct kstatfs *buf) +{ + struct dl_info *dli; + __u64 blimit, bfree, bavail; + __u32 ifree; + + dli = locate_dl_info(sb, dx_current_tag()); + if (!dli) + return; + + spin_lock(&dli->dl_lock); + if (dli->dl_inodes_total == (unsigned long)DLIM_INFINITY) + goto no_ilim; + + /* reduce max inodes available to limit */ + if (buf->f_files > dli->dl_inodes_total) + buf->f_files = dli->dl_inodes_total; + + ifree = dli->dl_inodes_total - dli->dl_inodes_used; + /* reduce free inodes to min */ + if (ifree < buf->f_ffree) + buf->f_ffree = ifree; + +no_ilim: + if (dli->dl_space_total == DLIM_INFINITY) + goto no_blim; + + blimit = dli->dl_space_total >> sb->s_blocksize_bits; + + if (dli->dl_space_total < dli->dl_space_used) + bfree = 0; + else + bfree = (dli->dl_space_total - dli->dl_space_used) + >> sb->s_blocksize_bits; + + bavail = ((dli->dl_space_total >> 10) * dli->dl_nrlmult); + if (bavail < dli->dl_space_used) + bavail = 0; + else + bavail = (bavail - dli->dl_space_used) + >> sb->s_blocksize_bits; + + /* reduce max space available to limit */ + if (buf->f_blocks > blimit) + buf->f_blocks = blimit; + + /* reduce free space to min */ + if (bfree < buf->f_bfree) + buf->f_bfree = bfree; + + /* reduce avail space to min */ + if (bavail < buf->f_bavail) + buf->f_bavail = bavail; + +no_blim: + spin_unlock(&dli->dl_lock); + put_dl_info(dli); + + return; +} + +#include + +EXPORT_SYMBOL_GPL(locate_dl_info); +EXPORT_SYMBOL_GPL(rcu_free_dl_info); + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/helper.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/helper.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/helper.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/helper.c 2012-01-16 14:51:22.045408253 +0100 @@ -0,0 +1,223 @@ +/* + * linux/kernel/vserver/helper.c + * + * Virtual Context Support + * + * Copyright (C) 2004-2007 Herbert Pötzl + * + * V0.01 basic helper + * + */ + +#include +#include +#include +#include +#include + + +char vshelper_path[255] = "/sbin/vshelper"; + + +static int do_vshelper(char *name, char *argv[], char *envp[], int sync) +{ + int ret; + + if ((ret = call_usermodehelper(name, argv, envp, sync))) { + printk( KERN_WARNING + "%s: (%s %s) returned %s with %d\n", + name, argv[1], argv[2], + sync ? "sync" : "async", ret); + } + vxdprintk(VXD_CBIT(switch, 4), + "%s: (%s %s) returned %s with %d", + name, argv[1], argv[2], sync ? "sync" : "async", ret); + return ret; +} + +/* + * vshelper path is set via /proc/sys + * invoked by vserver sys_reboot(), with + * the following arguments + * + * argv [0] = vshelper_path; + * argv [1] = action: "restart", "halt", "poweroff", ... + * argv [2] = context identifier + * + * envp [*] = type-specific parameters + */ + +long vs_reboot_helper(struct vx_info *vxi, int cmd, void __user *arg) +{ + char id_buf[8], cmd_buf[16]; + char uid_buf[16], pid_buf[16]; + int ret; + + char *argv[] = {vshelper_path, NULL, id_buf, 0}; + char *envp[] = {"HOME=/", "TERM=linux", + "PATH=/sbin:/usr/sbin:/bin:/usr/bin", + uid_buf, pid_buf, cmd_buf, 0}; + + if (vx_info_state(vxi, VXS_HELPER)) + return -EAGAIN; + vxi->vx_state |= VXS_HELPER; + + snprintf(id_buf, sizeof(id_buf)-1, "%d", vxi->vx_id); + + snprintf(cmd_buf, sizeof(cmd_buf)-1, "VS_CMD=%08x", cmd); + snprintf(uid_buf, sizeof(uid_buf)-1, "VS_UID=%d", current_uid()); + snprintf(pid_buf, sizeof(pid_buf)-1, "VS_PID=%d", current->pid); + + switch (cmd) { + case LINUX_REBOOT_CMD_RESTART: + argv[1] = "restart"; + break; + + case LINUX_REBOOT_CMD_HALT: + argv[1] = "halt"; + break; + + case LINUX_REBOOT_CMD_POWER_OFF: + argv[1] = "poweroff"; + break; + + case LINUX_REBOOT_CMD_SW_SUSPEND: + argv[1] = "swsusp"; + break; + + case LINUX_REBOOT_CMD_OOM: + argv[1] = "oom"; + break; + + default: + vxi->vx_state &= ~VXS_HELPER; + return 0; + } + + ret = do_vshelper(vshelper_path, argv, envp, 0); + vxi->vx_state &= ~VXS_HELPER; + __wakeup_vx_info(vxi); + return (ret) ? -EPERM : 0; +} + + +long vs_reboot(unsigned int cmd, void __user *arg) +{ + struct vx_info *vxi = current_vx_info(); + long ret = 0; + + vxdprintk(VXD_CBIT(misc, 5), + "vs_reboot(%p[#%d],%u)", + vxi, vxi ? vxi->vx_id : 0, cmd); + + ret = vs_reboot_helper(vxi, cmd, arg); + if (ret) + return ret; + + vxi->reboot_cmd = cmd; + if (vx_info_flags(vxi, VXF_REBOOT_KILL, 0)) { + switch (cmd) { + case LINUX_REBOOT_CMD_RESTART: + case LINUX_REBOOT_CMD_HALT: + case LINUX_REBOOT_CMD_POWER_OFF: + vx_info_kill(vxi, 0, SIGKILL); + vx_info_kill(vxi, 1, SIGKILL); + default: + break; + } + } + return 0; +} + +long vs_oom_action(unsigned int cmd) +{ + struct vx_info *vxi = current_vx_info(); + long ret = 0; + + vxdprintk(VXD_CBIT(misc, 5), + "vs_oom_action(%p[#%d],%u)", + vxi, vxi ? vxi->vx_id : 0, cmd); + + ret = vs_reboot_helper(vxi, cmd, NULL); + if (ret) + return ret; + + vxi->reboot_cmd = cmd; + if (vx_info_flags(vxi, VXF_REBOOT_KILL, 0)) { + vx_info_kill(vxi, 0, SIGKILL); + vx_info_kill(vxi, 1, SIGKILL); + } + return 0; +} + +/* + * argv [0] = vshelper_path; + * argv [1] = action: "startup", "shutdown" + * argv [2] = context identifier + * + * envp [*] = type-specific parameters + */ + +long vs_state_change(struct vx_info *vxi, unsigned int cmd) +{ + char id_buf[8], cmd_buf[16]; + char *argv[] = {vshelper_path, NULL, id_buf, 0}; + char *envp[] = {"HOME=/", "TERM=linux", + "PATH=/sbin:/usr/sbin:/bin:/usr/bin", cmd_buf, 0}; + + if (!vx_info_flags(vxi, VXF_SC_HELPER, 0)) + return 0; + + snprintf(id_buf, sizeof(id_buf)-1, "%d", vxi->vx_id); + snprintf(cmd_buf, sizeof(cmd_buf)-1, "VS_CMD=%08x", cmd); + + switch (cmd) { + case VSC_STARTUP: + argv[1] = "startup"; + break; + case VSC_SHUTDOWN: + argv[1] = "shutdown"; + break; + default: + return 0; + } + + return do_vshelper(vshelper_path, argv, envp, 1); +} + + +/* + * argv [0] = vshelper_path; + * argv [1] = action: "netup", "netdown" + * argv [2] = context identifier + * + * envp [*] = type-specific parameters + */ + +long vs_net_change(struct nx_info *nxi, unsigned int cmd) +{ + char id_buf[8], cmd_buf[16]; + char *argv[] = {vshelper_path, NULL, id_buf, 0}; + char *envp[] = {"HOME=/", "TERM=linux", + "PATH=/sbin:/usr/sbin:/bin:/usr/bin", cmd_buf, 0}; + + if (!nx_info_flags(nxi, NXF_SC_HELPER, 0)) + return 0; + + snprintf(id_buf, sizeof(id_buf)-1, "%d", nxi->nx_id); + snprintf(cmd_buf, sizeof(cmd_buf)-1, "VS_CMD=%08x", cmd); + + switch (cmd) { + case VSC_NETUP: + argv[1] = "netup"; + break; + case VSC_NETDOWN: + argv[1] = "netdown"; + break; + default: + return 0; + } + + return do_vshelper(vshelper_path, argv, envp, 1); +} + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/history.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/history.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/history.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/history.c 2012-01-16 14:51:22.045408253 +0100 @@ -0,0 +1,258 @@ +/* + * kernel/vserver/history.c + * + * Virtual Context History Backtrace + * + * Copyright (C) 2004-2007 Herbert Pötzl + * + * V0.01 basic structure + * V0.02 hash/unhash and trace + * V0.03 preemption fixes + * + */ + +#include +#include + +#include +#include +#include +#include + + +#ifdef CONFIG_VSERVER_HISTORY +#define VXH_SIZE CONFIG_VSERVER_HISTORY_SIZE +#else +#define VXH_SIZE 64 +#endif + +struct _vx_history { + unsigned int counter; + + struct _vx_hist_entry entry[VXH_SIZE + 1]; +}; + + +DEFINE_PER_CPU(struct _vx_history, vx_history_buffer); + +unsigned volatile int vxh_active = 1; + +static atomic_t sequence = ATOMIC_INIT(0); + + +/* vxh_advance() + + * requires disabled preemption */ + +struct _vx_hist_entry *vxh_advance(void *loc) +{ + unsigned int cpu = smp_processor_id(); + struct _vx_history *hist = &per_cpu(vx_history_buffer, cpu); + struct _vx_hist_entry *entry; + unsigned int index; + + index = vxh_active ? (hist->counter++ % VXH_SIZE) : VXH_SIZE; + entry = &hist->entry[index]; + + entry->seq = atomic_inc_return(&sequence); + entry->loc = loc; + return entry; +} + +EXPORT_SYMBOL_GPL(vxh_advance); + + +#define VXH_LOC_FMTS "(#%04x,*%d):%p" + +#define VXH_LOC_ARGS(e) (e)->seq, cpu, (e)->loc + + +#define VXH_VXI_FMTS "%p[#%d,%d.%d]" + +#define VXH_VXI_ARGS(e) (e)->vxi.ptr, \ + (e)->vxi.ptr ? (e)->vxi.xid : 0, \ + (e)->vxi.ptr ? (e)->vxi.usecnt : 0, \ + (e)->vxi.ptr ? (e)->vxi.tasks : 0 + +void vxh_dump_entry(struct _vx_hist_entry *e, unsigned cpu) +{ + switch (e->type) { + case VXH_THROW_OOPS: + printk( VXH_LOC_FMTS " oops \n", VXH_LOC_ARGS(e)); + break; + + case VXH_GET_VX_INFO: + case VXH_PUT_VX_INFO: + printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS "\n", + VXH_LOC_ARGS(e), + (e->type == VXH_GET_VX_INFO) ? "get" : "put", + VXH_VXI_ARGS(e)); + break; + + case VXH_INIT_VX_INFO: + case VXH_SET_VX_INFO: + case VXH_CLR_VX_INFO: + printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS " @%p\n", + VXH_LOC_ARGS(e), + (e->type == VXH_INIT_VX_INFO) ? "init" : + ((e->type == VXH_SET_VX_INFO) ? "set" : "clr"), + VXH_VXI_ARGS(e), e->sc.data); + break; + + case VXH_CLAIM_VX_INFO: + case VXH_RELEASE_VX_INFO: + printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS " @%p\n", + VXH_LOC_ARGS(e), + (e->type == VXH_CLAIM_VX_INFO) ? "claim" : "release", + VXH_VXI_ARGS(e), e->sc.data); + break; + + case VXH_ALLOC_VX_INFO: + case VXH_DEALLOC_VX_INFO: + printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS "\n", + VXH_LOC_ARGS(e), + (e->type == VXH_ALLOC_VX_INFO) ? "alloc" : "dealloc", + VXH_VXI_ARGS(e)); + break; + + case VXH_HASH_VX_INFO: + case VXH_UNHASH_VX_INFO: + printk( VXH_LOC_FMTS " __%s_vx_info " VXH_VXI_FMTS "\n", + VXH_LOC_ARGS(e), + (e->type == VXH_HASH_VX_INFO) ? "hash" : "unhash", + VXH_VXI_ARGS(e)); + break; + + case VXH_LOC_VX_INFO: + case VXH_LOOKUP_VX_INFO: + case VXH_CREATE_VX_INFO: + printk( VXH_LOC_FMTS " __%s_vx_info [#%d] -> " VXH_VXI_FMTS "\n", + VXH_LOC_ARGS(e), + (e->type == VXH_CREATE_VX_INFO) ? "create" : + ((e->type == VXH_LOC_VX_INFO) ? "loc" : "lookup"), + e->ll.arg, VXH_VXI_ARGS(e)); + break; + } +} + +static void __vxh_dump_history(void) +{ + unsigned int i, cpu; + + printk("History:\tSEQ: %8x\tNR_CPUS: %d\n", + atomic_read(&sequence), NR_CPUS); + + for (i = 0; i < VXH_SIZE; i++) { + for_each_online_cpu(cpu) { + struct _vx_history *hist = + &per_cpu(vx_history_buffer, cpu); + unsigned int index = (hist->counter - i) % VXH_SIZE; + struct _vx_hist_entry *entry = &hist->entry[index]; + + vxh_dump_entry(entry, cpu); + } + } +} + +void vxh_dump_history(void) +{ + vxh_active = 0; +#ifdef CONFIG_SMP + local_irq_enable(); + smp_send_stop(); + local_irq_disable(); +#endif + __vxh_dump_history(); +} + + +/* vserver syscall commands below here */ + + +int vc_dump_history(uint32_t id) +{ + vxh_active = 0; + __vxh_dump_history(); + vxh_active = 1; + + return 0; +} + + +int do_read_history(struct __user _vx_hist_entry *data, + int cpu, uint32_t *index, uint32_t *count) +{ + int pos, ret = 0; + struct _vx_history *hist = &per_cpu(vx_history_buffer, cpu); + int end = hist->counter; + int start = end - VXH_SIZE + 2; + int idx = *index; + + /* special case: get current pos */ + if (!*count) { + *index = end; + return 0; + } + + /* have we lost some data? */ + if (idx < start) + idx = start; + + for (pos = 0; (pos < *count) && (idx < end); pos++, idx++) { + struct _vx_hist_entry *entry = + &hist->entry[idx % VXH_SIZE]; + + /* send entry to userspace */ + ret = copy_to_user(&data[pos], entry, sizeof(*entry)); + if (ret) + break; + } + /* save new index and count */ + *index = idx; + *count = pos; + return ret ? ret : (*index < end); +} + +int vc_read_history(uint32_t id, void __user *data) +{ + struct vcmd_read_history_v0 vc_data; + int ret; + + if (id >= NR_CPUS) + return -EINVAL; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = do_read_history((struct __user _vx_hist_entry *)vc_data.data, + id, &vc_data.index, &vc_data.count); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return ret; +} + +#ifdef CONFIG_COMPAT + +int vc_read_history_x32(uint32_t id, void __user *data) +{ + struct vcmd_read_history_v0_x32 vc_data; + int ret; + + if (id >= NR_CPUS) + return -EINVAL; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = do_read_history((struct __user _vx_hist_entry *) + compat_ptr(vc_data.data_ptr), + id, &vc_data.index, &vc_data.count); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return ret; +} + +#endif /* CONFIG_COMPAT */ + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/inet.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/inet.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/inet.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/inet.c 2012-01-16 14:51:22.045408253 +0100 @@ -0,0 +1,225 @@ + +#include +#include +#include +#include +#include +#include +#include + + +int nx_v4_addr_conflict(struct nx_info *nxi1, struct nx_info *nxi2) +{ + int ret = 0; + + if (!nxi1 || !nxi2 || nxi1 == nxi2) + ret = 1; + else { + struct nx_addr_v4 *ptr; + + for (ptr = &nxi1->v4; ptr; ptr = ptr->next) { + if (v4_nx_addr_in_nx_info(nxi2, ptr, -1)) { + ret = 1; + break; + } + } + } + + vxdprintk(VXD_CBIT(net, 2), + "nx_v4_addr_conflict(%p,%p): %d", + nxi1, nxi2, ret); + + return ret; +} + + +#ifdef CONFIG_IPV6 + +int nx_v6_addr_conflict(struct nx_info *nxi1, struct nx_info *nxi2) +{ + int ret = 0; + + if (!nxi1 || !nxi2 || nxi1 == nxi2) + ret = 1; + else { + struct nx_addr_v6 *ptr; + + for (ptr = &nxi1->v6; ptr; ptr = ptr->next) { + if (v6_nx_addr_in_nx_info(nxi2, ptr, -1)) { + ret = 1; + break; + } + } + } + + vxdprintk(VXD_CBIT(net, 2), + "nx_v6_addr_conflict(%p,%p): %d", + nxi1, nxi2, ret); + + return ret; +} + +#endif + +int v4_dev_in_nx_info(struct net_device *dev, struct nx_info *nxi) +{ + struct in_device *in_dev; + struct in_ifaddr **ifap; + struct in_ifaddr *ifa; + int ret = 0; + + if (!dev) + goto out; + in_dev = in_dev_get(dev); + if (!in_dev) + goto out; + + for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL; + ifap = &ifa->ifa_next) { + if (v4_addr_in_nx_info(nxi, ifa->ifa_local, NXA_MASK_SHOW)) { + ret = 1; + break; + } + } + in_dev_put(in_dev); +out: + return ret; +} + + +#ifdef CONFIG_IPV6 + +int v6_dev_in_nx_info(struct net_device *dev, struct nx_info *nxi) +{ + struct inet6_dev *in_dev; + struct inet6_ifaddr **ifap; + struct inet6_ifaddr *ifa; + int ret = 0; + + if (!dev) + goto out; + in_dev = in6_dev_get(dev); + if (!in_dev) + goto out; + + for (ifap = &in_dev->addr_list; (ifa = *ifap) != NULL; + ifap = &ifa->if_next) { + if (v6_addr_in_nx_info(nxi, &ifa->addr, -1)) { + ret = 1; + break; + } + } + in6_dev_put(in_dev); +out: + return ret; +} + +#endif + +int dev_in_nx_info(struct net_device *dev, struct nx_info *nxi) +{ + int ret = 1; + + if (!nxi) + goto out; + if (nxi->v4.type && v4_dev_in_nx_info(dev, nxi)) + goto out; +#ifdef CONFIG_IPV6 + ret = 2; + if (nxi->v6.type && v6_dev_in_nx_info(dev, nxi)) + goto out; +#endif + ret = 0; +out: + vxdprintk(VXD_CBIT(net, 3), + "dev_in_nx_info(%p,%p[#%d]) = %d", + dev, nxi, nxi ? nxi->nx_id : 0, ret); + return ret; +} + +int ip_v4_find_src(struct net *net, struct nx_info *nxi, + struct rtable **rp, struct flowi *fl) +{ + if (!nxi) + return 0; + + /* FIXME: handle lback only case */ + if (!NX_IPV4(nxi)) + return -EPERM; + + vxdprintk(VXD_CBIT(net, 4), + "ip_v4_find_src(%p[#%u]) " NIPQUAD_FMT " -> " NIPQUAD_FMT, + nxi, nxi ? nxi->nx_id : 0, + NIPQUAD(fl->fl4_src), NIPQUAD(fl->fl4_dst)); + + /* single IP is unconditional */ + if (nx_info_flags(nxi, NXF_SINGLE_IP, 0) && + (fl->fl4_src == INADDR_ANY)) + fl->fl4_src = nxi->v4.ip[0].s_addr; + + if (fl->fl4_src == INADDR_ANY) { + struct nx_addr_v4 *ptr; + __be32 found = 0; + int err; + + err = __ip_route_output_key(net, rp, fl); + if (!err) { + found = (*rp)->rt_src; + ip_rt_put(*rp); + vxdprintk(VXD_CBIT(net, 4), + "ip_v4_find_src(%p[#%u]) rok[%u]: " NIPQUAD_FMT, + nxi, nxi ? nxi->nx_id : 0, fl->oif, NIPQUAD(found)); + if (v4_addr_in_nx_info(nxi, found, NXA_MASK_BIND)) + goto found; + } + + for (ptr = &nxi->v4; ptr; ptr = ptr->next) { + __be32 primary = ptr->ip[0].s_addr; + __be32 mask = ptr->mask.s_addr; + __be32 neta = primary & mask; + + vxdprintk(VXD_CBIT(net, 4), "ip_v4_find_src(%p[#%u]) chk: " + NIPQUAD_FMT "/" NIPQUAD_FMT "/" NIPQUAD_FMT, + nxi, nxi ? nxi->nx_id : 0, NIPQUAD(primary), + NIPQUAD(mask), NIPQUAD(neta)); + if ((found & mask) != neta) + continue; + + fl->fl4_src = primary; + err = __ip_route_output_key(net, rp, fl); + vxdprintk(VXD_CBIT(net, 4), + "ip_v4_find_src(%p[#%u]) rok[%u]: " NIPQUAD_FMT, + nxi, nxi ? nxi->nx_id : 0, fl->oif, NIPQUAD(primary)); + if (!err) { + found = (*rp)->rt_src; + ip_rt_put(*rp); + if (found == primary) + goto found; + } + } + /* still no source ip? */ + found = ipv4_is_loopback(fl->fl4_dst) + ? IPI_LOOPBACK : nxi->v4.ip[0].s_addr; + found: + /* assign src ip to flow */ + fl->fl4_src = found; + + } else { + if (!v4_addr_in_nx_info(nxi, fl->fl4_src, NXA_MASK_BIND)) + return -EPERM; + } + + if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0)) { + if (ipv4_is_loopback(fl->fl4_dst)) + fl->fl4_dst = nxi->v4_lback.s_addr; + if (ipv4_is_loopback(fl->fl4_src)) + fl->fl4_src = nxi->v4_lback.s_addr; + } else if (ipv4_is_loopback(fl->fl4_dst) && + !nx_info_flags(nxi, NXF_LBACK_ALLOW, 0)) + return -EPERM; + + return 0; +} + +EXPORT_SYMBOL_GPL(ip_v4_find_src); + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/init.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/init.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/init.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/init.c 2012-01-16 14:51:22.045408253 +0100 @@ -0,0 +1,45 @@ +/* + * linux/kernel/init.c + * + * Virtual Server Init + * + * Copyright (C) 2004-2007 Herbert Pötzl + * + * V0.01 basic structure + * + */ + +#include + +int vserver_register_sysctl(void); +void vserver_unregister_sysctl(void); + + +static int __init init_vserver(void) +{ + int ret = 0; + +#ifdef CONFIG_VSERVER_DEBUG + vserver_register_sysctl(); +#endif + return ret; +} + + +static void __exit exit_vserver(void) +{ + +#ifdef CONFIG_VSERVER_DEBUG + vserver_unregister_sysctl(); +#endif + return; +} + +/* FIXME: GFP_ZONETYPES gone +long vx_slab[GFP_ZONETYPES]; */ +long vx_area; + + +module_init(init_vserver); +module_exit(exit_vserver); + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/inode.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/inode.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/inode.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/inode.c 2012-01-16 14:51:22.045408253 +0100 @@ -0,0 +1,433 @@ +/* + * linux/kernel/vserver/inode.c + * + * Virtual Server: File System Support + * + * Copyright (C) 2004-2007 Herbert Pötzl + * + * V0.01 separated from vcontext V0.05 + * V0.02 moved to tag (instead of xid) + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + +static int __vc_get_iattr(struct inode *in, uint32_t *tag, uint32_t *flags, uint32_t *mask) +{ + struct proc_dir_entry *entry; + + if (!in || !in->i_sb) + return -ESRCH; + + *flags = IATTR_TAG + | (IS_IMMUTABLE(in) ? IATTR_IMMUTABLE : 0) + | (IS_IXUNLINK(in) ? IATTR_IXUNLINK : 0) + | (IS_BARRIER(in) ? IATTR_BARRIER : 0) + | (IS_COW(in) ? IATTR_COW : 0); + *mask = IATTR_IXUNLINK | IATTR_IMMUTABLE | IATTR_COW; + + if (S_ISDIR(in->i_mode)) + *mask |= IATTR_BARRIER; + + if (IS_TAGGED(in)) { + *tag = in->i_tag; + *mask |= IATTR_TAG; + } + + switch (in->i_sb->s_magic) { + case PROC_SUPER_MAGIC: + entry = PROC_I(in)->pde; + + /* check for specific inodes? */ + if (entry) + *mask |= IATTR_FLAGS; + if (entry) + *flags |= (entry->vx_flags & IATTR_FLAGS); + else + *flags |= (PROC_I(in)->vx_flags & IATTR_FLAGS); + break; + + case DEVPTS_SUPER_MAGIC: + *tag = in->i_tag; + *mask |= IATTR_TAG; + break; + + default: + break; + } + return 0; +} + +int vc_get_iattr(void __user *data) +{ + struct path path; + struct vcmd_ctx_iattr_v1 vc_data = { .tag = -1 }; + int ret; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = user_lpath(vc_data.name, &path); + if (!ret) { + ret = __vc_get_iattr(path.dentry->d_inode, + &vc_data.tag, &vc_data.flags, &vc_data.mask); + path_put(&path); + } + if (ret) + return ret; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + ret = -EFAULT; + return ret; +} + +#ifdef CONFIG_COMPAT + +int vc_get_iattr_x32(void __user *data) +{ + struct path path; + struct vcmd_ctx_iattr_v1_x32 vc_data = { .tag = -1 }; + int ret; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = user_lpath(compat_ptr(vc_data.name_ptr), &path); + if (!ret) { + ret = __vc_get_iattr(path.dentry->d_inode, + &vc_data.tag, &vc_data.flags, &vc_data.mask); + path_put(&path); + } + if (ret) + return ret; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + ret = -EFAULT; + return ret; +} + +#endif /* CONFIG_COMPAT */ + + +int vc_fget_iattr(uint32_t fd, void __user *data) +{ + struct file *filp; + struct vcmd_ctx_fiattr_v0 vc_data = { .tag = -1 }; + int ret; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + filp = fget(fd); + if (!filp || !filp->f_dentry || !filp->f_dentry->d_inode) + return -EBADF; + + ret = __vc_get_iattr(filp->f_dentry->d_inode, + &vc_data.tag, &vc_data.flags, &vc_data.mask); + + fput(filp); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + ret = -EFAULT; + return ret; +} + + +static int __vc_set_iattr(struct dentry *de, uint32_t *tag, uint32_t *flags, uint32_t *mask) +{ + struct inode *in = de->d_inode; + int error = 0, is_proc = 0, has_tag = 0; + struct iattr attr = { 0 }; + + if (!in || !in->i_sb) + return -ESRCH; + + is_proc = (in->i_sb->s_magic == PROC_SUPER_MAGIC); + if ((*mask & IATTR_FLAGS) && !is_proc) + return -EINVAL; + + has_tag = IS_TAGGED(in) || + (in->i_sb->s_magic == DEVPTS_SUPER_MAGIC); + if ((*mask & IATTR_TAG) && !has_tag) + return -EINVAL; + + mutex_lock(&in->i_mutex); + if (*mask & IATTR_TAG) { + attr.ia_tag = *tag; + attr.ia_valid |= ATTR_TAG; + } + + if (*mask & IATTR_FLAGS) { + struct proc_dir_entry *entry = PROC_I(in)->pde; + unsigned int iflags = PROC_I(in)->vx_flags; + + iflags = (iflags & ~(*mask & IATTR_FLAGS)) + | (*flags & IATTR_FLAGS); + PROC_I(in)->vx_flags = iflags; + if (entry) + entry->vx_flags = iflags; + } + + if (*mask & (IATTR_IMMUTABLE | IATTR_IXUNLINK | + IATTR_BARRIER | IATTR_COW)) { + int iflags = in->i_flags; + int vflags = in->i_vflags; + + if (*mask & IATTR_IMMUTABLE) { + if (*flags & IATTR_IMMUTABLE) + iflags |= S_IMMUTABLE; + else + iflags &= ~S_IMMUTABLE; + } + if (*mask & IATTR_IXUNLINK) { + if (*flags & IATTR_IXUNLINK) + iflags |= S_IXUNLINK; + else + iflags &= ~S_IXUNLINK; + } + if (S_ISDIR(in->i_mode) && (*mask & IATTR_BARRIER)) { + if (*flags & IATTR_BARRIER) + vflags |= V_BARRIER; + else + vflags &= ~V_BARRIER; + } + if (S_ISREG(in->i_mode) && (*mask & IATTR_COW)) { + if (*flags & IATTR_COW) + vflags |= V_COW; + else + vflags &= ~V_COW; + } + if (in->i_op && in->i_op->sync_flags) { + error = in->i_op->sync_flags(in, iflags, vflags); + if (error) + goto out; + } + } + + if (attr.ia_valid) { + if (in->i_op && in->i_op->setattr) + error = in->i_op->setattr(de, &attr); + else { + error = inode_change_ok(in, &attr); + if (!error) + error = inode_setattr(in, &attr); + } + } + +out: + mutex_unlock(&in->i_mutex); + return error; +} + +int vc_set_iattr(void __user *data) +{ + struct path path; + struct vcmd_ctx_iattr_v1 vc_data; + int ret; + + if (!capable(CAP_LINUX_IMMUTABLE)) + return -EPERM; + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = user_lpath(vc_data.name, &path); + if (!ret) { + ret = __vc_set_iattr(path.dentry, + &vc_data.tag, &vc_data.flags, &vc_data.mask); + path_put(&path); + } + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + ret = -EFAULT; + return ret; +} + +#ifdef CONFIG_COMPAT + +int vc_set_iattr_x32(void __user *data) +{ + struct path path; + struct vcmd_ctx_iattr_v1_x32 vc_data; + int ret; + + if (!capable(CAP_LINUX_IMMUTABLE)) + return -EPERM; + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = user_lpath(compat_ptr(vc_data.name_ptr), &path); + if (!ret) { + ret = __vc_set_iattr(path.dentry, + &vc_data.tag, &vc_data.flags, &vc_data.mask); + path_put(&path); + } + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + ret = -EFAULT; + return ret; +} + +#endif /* CONFIG_COMPAT */ + +int vc_fset_iattr(uint32_t fd, void __user *data) +{ + struct file *filp; + struct vcmd_ctx_fiattr_v0 vc_data; + int ret; + + if (!capable(CAP_LINUX_IMMUTABLE)) + return -EPERM; + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + filp = fget(fd); + if (!filp || !filp->f_dentry || !filp->f_dentry->d_inode) + return -EBADF; + + ret = __vc_set_iattr(filp->f_dentry, &vc_data.tag, + &vc_data.flags, &vc_data.mask); + + fput(filp); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return ret; +} + + +enum { Opt_notagcheck, Opt_tag, Opt_notag, Opt_tagid, Opt_err }; + +static match_table_t tokens = { + {Opt_notagcheck, "notagcheck"}, +#ifdef CONFIG_PROPAGATE + {Opt_notag, "notag"}, + {Opt_tag, "tag"}, + {Opt_tagid, "tagid=%u"}, +#endif + {Opt_err, NULL} +}; + + +static void __dx_parse_remove(char *string, char *opt) +{ + char *p = strstr(string, opt); + char *q = p; + + if (p) { + while (*q != '\0' && *q != ',') + q++; + while (*q) + *p++ = *q++; + while (*p) + *p++ = '\0'; + } +} + +int dx_parse_tag(char *string, tag_t *tag, int remove, int *mnt_flags, + unsigned long *flags) +{ + int set = 0; + substring_t args[MAX_OPT_ARGS]; + int token, option = 0; + char *s, *p, *opts; + + if (!string) + return 0; + s = kstrdup(string, GFP_KERNEL | GFP_ATOMIC); + if (!s) + return 0; + + opts = s; + while ((p = strsep(&opts, ",")) != NULL) { + token = match_token(p, tokens, args); + + vxdprintk(VXD_CBIT(tag, 7), + "dx_parse_tag(»%s«): %d:#%d", + p, token, option); + + switch (token) { +#ifdef CONFIG_PROPAGATE + case Opt_tag: + if (tag) + *tag = 0; + if (remove) + __dx_parse_remove(s, "tag"); + *mnt_flags |= MNT_TAGID; + set |= MNT_TAGID; + break; + case Opt_notag: + if (remove) + __dx_parse_remove(s, "notag"); + *mnt_flags |= MNT_NOTAG; + set |= MNT_NOTAG; + break; + case Opt_tagid: + if (tag && !match_int(args, &option)) + *tag = option; + if (remove) + __dx_parse_remove(s, "tagid"); + *mnt_flags |= MNT_TAGID; + set |= MNT_TAGID; + break; +#endif + case Opt_notagcheck: + if (remove) + __dx_parse_remove(s, "notagcheck"); + *flags |= MS_NOTAGCHECK; + set |= MS_NOTAGCHECK; + break; + } + } + if (set) + strcpy(string, s); + kfree(s); + return set; +} + +#ifdef CONFIG_PROPAGATE + +void __dx_propagate_tag(struct nameidata *nd, struct inode *inode) +{ + tag_t new_tag = 0; + struct vfsmount *mnt; + int propagate; + + if (!nd) + return; + mnt = nd->path.mnt; + if (!mnt) + return; + + propagate = (mnt->mnt_flags & MNT_TAGID); + if (propagate) + new_tag = mnt->mnt_tag; + + vxdprintk(VXD_CBIT(tag, 7), + "dx_propagate_tag(%p[#%lu.%d]): %d,%d", + inode, inode->i_ino, inode->i_tag, + new_tag, (propagate) ? 1 : 0); + + if (propagate) + inode->i_tag = new_tag; +} + +#include + +EXPORT_SYMBOL_GPL(__dx_propagate_tag); + +#endif /* CONFIG_PROPAGATE */ + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/Kconfig --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/Kconfig 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/Kconfig 2012-01-16 14:51:22.033408295 +0100 @@ -0,0 +1,251 @@ +# +# Linux VServer configuration +# + +menu "Linux VServer" + +config VSERVER_AUTO_LBACK + bool "Automatically Assign Loopback IP" + default y + help + Automatically assign a guest specific loopback + IP and add it to the kernel network stack on + startup. + +config VSERVER_AUTO_SINGLE + bool "Automatic Single IP Special Casing" + depends on EXPERIMENTAL + default y + help + This allows network contexts with a single IP to + automatically remap 0.0.0.0 bindings to that IP, + avoiding further network checks and improving + performance. + + (note: such guests do not allow to change the ip + on the fly and do not show loopback addresses) + +config VSERVER_COWBL + bool "Enable COW Immutable Link Breaking" + default y + help + This enables the COW (Copy-On-Write) link break code. + It allows you to treat unified files like normal files + when writing to them (which will implicitely break the + link and create a copy of the unified file) + +config VSERVER_VTIME + bool "Enable Virtualized Guest Time" + depends on EXPERIMENTAL + default n + help + This enables per guest time offsets to allow for + adjusting the system clock individually per guest. + this adds some overhead to the time functions and + therefore should not be enabled without good reason. + +config VSERVER_DEVICE + bool "Enable Guest Device Mapping" + depends on EXPERIMENTAL + default n + help + This enables generic device remapping. + +config VSERVER_PROC_SECURE + bool "Enable Proc Security" + depends on PROC_FS + default y + help + This configures ProcFS security to initially hide + non-process entries for all contexts except the main and + spectator context (i.e. for all guests), which is a secure + default. + + (note: on 1.2x the entries were visible by default) + +config VSERVER_HARDCPU + bool "Enable Hard CPU Limits" + default y + help + Activate the Hard CPU Limits + + This will compile in code that allows the Token Bucket + Scheduler to put processes on hold when a context's + tokens are depleted (provided that its per-context + sched_hard flag is set). + + Processes belonging to that context will not be able + to consume CPU resources again until a per-context + configured minimum of tokens has been reached. + +config VSERVER_IDLETIME + bool "Avoid idle CPUs by skipping Time" + depends on VSERVER_HARDCPU + default y + help + This option allows the scheduler to artificially + advance time (per cpu) when otherwise the idle + task would be scheduled, thus keeping the cpu + busy and sharing the available resources among + certain contexts. + +config VSERVER_IDLELIMIT + bool "Limit the IDLE task" + depends on VSERVER_HARDCPU + default n + help + Limit the idle slices, so the the next context + will be scheduled as soon as possible. + + This might improve interactivity and latency, but + will also marginally increase scheduling overhead. + +choice + prompt "Persistent Inode Tagging" + default TAGGING_ID24 + help + This adds persistent context information to filesystems + mounted with the tagxid option. Tagging is a requirement + for per-context disk limits and per-context quota. + + +config TAGGING_NONE + bool "Disabled" + help + do not store per-context information in inodes. + +config TAGGING_UID16 + bool "UID16/GID32" + help + reduces UID to 16 bit, but leaves GID at 32 bit. + +config TAGGING_GID16 + bool "UID32/GID16" + help + reduces GID to 16 bit, but leaves UID at 32 bit. + +config TAGGING_ID24 + bool "UID24/GID24" + help + uses the upper 8bit from UID and GID for XID tagging + which leaves 24bit for UID/GID each, which should be + more than sufficient for normal use. + +config TAGGING_INTERN + bool "UID32/GID32" + help + this uses otherwise reserved inode fields in the on + disk representation, which limits the use to a few + filesystems (currently ext2 and ext3) + +endchoice + +config TAG_NFSD + bool "Tag NFSD User Auth and Files" + default n + help + Enable this if you do want the in-kernel NFS + Server to use the tagging specified above. + (will require patched clients too) + +config VSERVER_PRIVACY + bool "Honor Privacy Aspects of Guests" + default n + help + When enabled, most context checks will disallow + access to structures assigned to a specific context, + like ptys or loop devices. + +config VSERVER_CONTEXTS + int "Maximum number of Contexts (1-65533)" if EMBEDDED + range 1 65533 + default "768" if 64BIT + default "256" + help + This setting will optimize certain data structures + and memory allocations according to the expected + maximum. + + note: this is not a strict upper limit. + +config VSERVER_WARN + bool "VServer Warnings" + default y + help + This enables various runtime warnings, which will + notify about potential manipulation attempts or + resource shortage. It is generally considered to + be a good idea to have that enabled. + +config VSERVER_DEBUG + bool "VServer Debugging Code" + default n + help + Set this to yes if you want to be able to activate + debugging output at runtime. It adds a very small + overhead to all vserver related functions and + increases the kernel size by about 20k. + +config VSERVER_HISTORY + bool "VServer History Tracing" + depends on VSERVER_DEBUG + default n + help + Set this to yes if you want to record the history of + linux-vserver activities, so they can be replayed in + the event of a kernel panic or oops. + +config VSERVER_HISTORY_SIZE + int "Per-CPU History Size (32-65536)" + depends on VSERVER_HISTORY + range 32 65536 + default 64 + help + This allows you to specify the number of entries in + the per-CPU history buffer. + +config VSERVER_MONITOR + bool "VServer Scheduling Monitor" + depends on VSERVER_DISABLED + default n + help + Set this to yes if you want to record the scheduling + decisions, so that they can be relayed to userspace + for detailed analysis. + +config VSERVER_MONITOR_SIZE + int "Per-CPU Monitor Queue Size (32-65536)" + depends on VSERVER_MONITOR + range 32 65536 + default 1024 + help + This allows you to specify the number of entries in + the per-CPU scheduling monitor buffer. + +config VSERVER_MONITOR_SYNC + int "Per-CPU Monitor Sync Interval (0-65536)" + depends on VSERVER_MONITOR + range 0 65536 + default 256 + help + This allows you to specify the interval in ticks + when a time sync entry is inserted. + +endmenu + + +config VSERVER + bool + default y + select NAMESPACES + select UTS_NS + select IPC_NS + select USER_NS + select SYSVIPC + +config VSERVER_SECURITY + bool + depends on SECURITY + default y + select SECURITY_CAPABILITIES + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/limit.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/limit.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/limit.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/limit.c 2012-01-16 14:51:22.045408253 +0100 @@ -0,0 +1,392 @@ +/* + * linux/kernel/vserver/limit.c + * + * Virtual Server: Context Limits + * + * Copyright (C) 2004-2010 Herbert Pötzl + * + * V0.01 broken out from vcontext V0.05 + * V0.02 changed vcmds to vxi arg + * V0.03 added memory cgroup support + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + + +const char *vlimit_name[NUM_LIMITS] = { + [RLIMIT_CPU] = "CPU", + [RLIMIT_RSS] = "RSS", + [RLIMIT_NPROC] = "NPROC", + [RLIMIT_NOFILE] = "NOFILE", + [RLIMIT_MEMLOCK] = "VML", + [RLIMIT_AS] = "VM", + [RLIMIT_LOCKS] = "LOCKS", + [RLIMIT_SIGPENDING] = "SIGP", + [RLIMIT_MSGQUEUE] = "MSGQ", + + [VLIMIT_NSOCK] = "NSOCK", + [VLIMIT_OPENFD] = "OPENFD", + [VLIMIT_ANON] = "ANON", + [VLIMIT_SHMEM] = "SHMEM", + [VLIMIT_DENTRY] = "DENTRY", +}; + +EXPORT_SYMBOL_GPL(vlimit_name); + +#define MASK_ENTRY(x) (1 << (x)) + +const struct vcmd_ctx_rlimit_mask_v0 vlimit_mask = { + /* minimum */ + 0 + , /* softlimit */ + MASK_ENTRY( RLIMIT_RSS ) | + MASK_ENTRY( VLIMIT_ANON ) | + 0 + , /* maximum */ + MASK_ENTRY( RLIMIT_RSS ) | + MASK_ENTRY( RLIMIT_NPROC ) | + MASK_ENTRY( RLIMIT_NOFILE ) | + MASK_ENTRY( RLIMIT_MEMLOCK ) | + MASK_ENTRY( RLIMIT_AS ) | + MASK_ENTRY( RLIMIT_LOCKS ) | + MASK_ENTRY( RLIMIT_MSGQUEUE ) | + + MASK_ENTRY( VLIMIT_NSOCK ) | + MASK_ENTRY( VLIMIT_OPENFD ) | + MASK_ENTRY( VLIMIT_ANON ) | + MASK_ENTRY( VLIMIT_SHMEM ) | + MASK_ENTRY( VLIMIT_DENTRY ) | + 0 +}; + /* accounting only */ +uint32_t account_mask = + MASK_ENTRY( VLIMIT_SEMARY ) | + MASK_ENTRY( VLIMIT_NSEMS ) | + MASK_ENTRY( VLIMIT_MAPPED ) | + 0; + + +static int is_valid_vlimit(int id) +{ + uint32_t mask = vlimit_mask.minimum | + vlimit_mask.softlimit | vlimit_mask.maximum; + return mask & (1 << id); +} + +static int is_accounted_vlimit(int id) +{ + if (is_valid_vlimit(id)) + return 1; + return account_mask & (1 << id); +} + + +static inline uint64_t vc_get_soft(struct vx_info *vxi, int id) +{ + rlim_t limit = __rlim_soft(&vxi->limit, id); + return VX_VLIM(limit); +} + +static inline uint64_t vc_get_hard(struct vx_info *vxi, int id) +{ + rlim_t limit = __rlim_hard(&vxi->limit, id); + return VX_VLIM(limit); +} + +static int do_get_rlimit(struct vx_info *vxi, uint32_t id, + uint64_t *minimum, uint64_t *softlimit, uint64_t *maximum) +{ + if (!is_valid_vlimit(id)) + return -EINVAL; + + if (minimum) + *minimum = CRLIM_UNSET; + if (softlimit) + *softlimit = vc_get_soft(vxi, id); + if (maximum) + *maximum = vc_get_hard(vxi, id); + return 0; +} + +int vc_get_rlimit(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_rlimit_v0 vc_data; + int ret; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = do_get_rlimit(vxi, vc_data.id, + &vc_data.minimum, &vc_data.softlimit, &vc_data.maximum); + if (ret) + return ret; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +static int do_set_rlimit(struct vx_info *vxi, uint32_t id, + uint64_t minimum, uint64_t softlimit, uint64_t maximum) +{ + if (!is_valid_vlimit(id)) + return -EINVAL; + + if (maximum != CRLIM_KEEP) + __rlim_hard(&vxi->limit, id) = VX_RLIM(maximum); + if (softlimit != CRLIM_KEEP) + __rlim_soft(&vxi->limit, id) = VX_RLIM(softlimit); + + /* clamp soft limit */ + if (__rlim_soft(&vxi->limit, id) > __rlim_hard(&vxi->limit, id)) + __rlim_soft(&vxi->limit, id) = __rlim_hard(&vxi->limit, id); + + return 0; +} + +int vc_set_rlimit(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_rlimit_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_set_rlimit(vxi, vc_data.id, + vc_data.minimum, vc_data.softlimit, vc_data.maximum); +} + +#ifdef CONFIG_IA32_EMULATION + +int vc_set_rlimit_x32(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_rlimit_v0_x32 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_set_rlimit(vxi, vc_data.id, + vc_data.minimum, vc_data.softlimit, vc_data.maximum); +} + +int vc_get_rlimit_x32(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_rlimit_v0_x32 vc_data; + int ret; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = do_get_rlimit(vxi, vc_data.id, + &vc_data.minimum, &vc_data.softlimit, &vc_data.maximum); + if (ret) + return ret; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +#endif /* CONFIG_IA32_EMULATION */ + + +int vc_get_rlimit_mask(uint32_t id, void __user *data) +{ + if (copy_to_user(data, &vlimit_mask, sizeof(vlimit_mask))) + return -EFAULT; + return 0; +} + + +static inline void vx_reset_hits(struct _vx_limit *limit) +{ + int lim; + + for (lim = 0; lim < NUM_LIMITS; lim++) { + atomic_set(&__rlim_lhit(limit, lim), 0); + } +} + +int vc_reset_hits(struct vx_info *vxi, void __user *data) +{ + vx_reset_hits(&vxi->limit); + return 0; +} + +static inline void vx_reset_minmax(struct _vx_limit *limit) +{ + rlim_t value; + int lim; + + for (lim = 0; lim < NUM_LIMITS; lim++) { + value = __rlim_get(limit, lim); + __rlim_rmax(limit, lim) = value; + __rlim_rmin(limit, lim) = value; + } +} + +int vc_reset_minmax(struct vx_info *vxi, void __user *data) +{ + vx_reset_minmax(&vxi->limit); + return 0; +} + + +int vc_rlimit_stat(struct vx_info *vxi, void __user *data) +{ + struct vcmd_rlimit_stat_v0 vc_data; + struct _vx_limit *limit = &vxi->limit; + int id; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + id = vc_data.id; + if (!is_accounted_vlimit(id)) + return -EINVAL; + + vx_limit_fixup(limit, id); + vc_data.hits = atomic_read(&__rlim_lhit(limit, id)); + vc_data.value = __rlim_get(limit, id); + vc_data.minimum = __rlim_rmin(limit, id); + vc_data.maximum = __rlim_rmax(limit, id); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + + +void vx_vsi_meminfo(struct sysinfo *val) +{ +#ifdef CONFIG_CGROUP_MEM_RES_CTLR + struct mem_cgroup *mcg = mem_cgroup_from_task(current); + u64 res_limit, res_usage; + + if (!mcg) + return; + + res_limit = mem_cgroup_res_read_u64(mcg, RES_LIMIT); + res_usage = mem_cgroup_res_read_u64(mcg, RES_USAGE); + + if (res_limit != RESOURCE_MAX) + val->totalram = (res_limit >> PAGE_SHIFT); + val->freeram = val->totalram - (res_usage >> PAGE_SHIFT); + val->bufferram = 0; +#else /* !CONFIG_CGROUP_MEM_RES_CTLR */ + struct vx_info *vxi = current_vx_info(); + unsigned long totalram, freeram; + rlim_t v; + + /* we blindly accept the max */ + v = __rlim_soft(&vxi->limit, RLIMIT_RSS); + totalram = (v != RLIM_INFINITY) ? v : val->totalram; + + /* total minus used equals free */ + v = __vx_cres_array_fixup(&vxi->limit, VLA_RSS); + freeram = (v < totalram) ? totalram - v : 0; + + val->totalram = totalram; + val->freeram = freeram; +#endif /* CONFIG_CGROUP_MEM_RES_CTLR */ + val->totalhigh = 0; + val->freehigh = 0; + return; +} + +void vx_vsi_swapinfo(struct sysinfo *val) +{ +#ifdef CONFIG_CGROUP_MEM_RES_CTLR +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP + struct mem_cgroup *mcg = mem_cgroup_from_task(current); + u64 res_limit, res_usage, memsw_limit, memsw_usage; + s64 swap_limit, swap_usage; + + if (!mcg) + return; + + res_limit = mem_cgroup_res_read_u64(mcg, RES_LIMIT); + res_usage = mem_cgroup_res_read_u64(mcg, RES_USAGE); + memsw_limit = mem_cgroup_memsw_read_u64(mcg, RES_LIMIT); + memsw_usage = mem_cgroup_memsw_read_u64(mcg, RES_USAGE); + + if (res_limit == RESOURCE_MAX) + return; + + swap_limit = memsw_limit - res_limit; + if (memsw_limit != RESOURCE_MAX) + val->totalswap = swap_limit >> PAGE_SHIFT; + + swap_usage = memsw_usage - res_usage; + val->freeswap = (swap_usage < swap_limit) ? + val->totalswap - (swap_usage >> PAGE_SHIFT) : 0; +#else /* !CONFIG_CGROUP_MEM_RES_CTLR_SWAP */ + val->totalswap = 0; + val->freeswap = 0; +#endif /* !CONFIG_CGROUP_MEM_RES_CTLR_SWAP */ +#else /* !CONFIG_CGROUP_MEM_RES_CTLR */ + struct vx_info *vxi = current_vx_info(); + unsigned long totalswap, freeswap; + rlim_t v, w; + + v = __rlim_soft(&vxi->limit, RLIMIT_RSS); + if (v == RLIM_INFINITY) { + val->freeswap = val->totalswap; + return; + } + + /* we blindly accept the max */ + w = __rlim_hard(&vxi->limit, RLIMIT_RSS); + totalswap = (w != RLIM_INFINITY) ? (w - v) : val->totalswap; + + /* currently 'used' swap */ + w = __vx_cres_array_fixup(&vxi->limit, VLA_RSS); + w -= (w > v) ? v : w; + + /* total minus used equals free */ + freeswap = (w < totalswap) ? totalswap - w : 0; + + val->totalswap = totalswap; + val->freeswap = freeswap; +#endif /* CONFIG_CGROUP_MEM_RES_CTLR */ + return; +} + +long vx_vsi_cached(struct sysinfo *val) +{ +#ifdef CONFIG_CGROUP_MEM_RES_CTLR + struct mem_cgroup *mcg = mem_cgroup_from_task(current); + + return mem_cgroup_stat_read_cache(mcg); +#else + return 0; +#endif +} + + +unsigned long vx_badness(struct task_struct *task, struct mm_struct *mm) +{ + struct vx_info *vxi = mm->mm_vx_info; + unsigned long points; + rlim_t v, w; + + if (!vxi) + return 0; + + points = vxi->vx_badness_bias; + + v = __vx_cres_array_fixup(&vxi->limit, VLA_RSS); + w = __rlim_soft(&vxi->limit, RLIMIT_RSS); + points += (v > w) ? (v - w) : 0; + + return points; +} + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/limit_init.h kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/limit_init.h --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/limit_init.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/limit_init.h 2012-01-16 14:51:22.045408253 +0100 @@ -0,0 +1,31 @@ + + +static inline void vx_info_init_limit(struct _vx_limit *limit) +{ + int lim; + + for (lim = 0; lim < NUM_LIMITS; lim++) { + __rlim_soft(limit, lim) = RLIM_INFINITY; + __rlim_hard(limit, lim) = RLIM_INFINITY; + __rlim_set(limit, lim, 0); + atomic_set(&__rlim_lhit(limit, lim), 0); + __rlim_rmin(limit, lim) = 0; + __rlim_rmax(limit, lim) = 0; + } +} + +static inline void vx_info_exit_limit(struct _vx_limit *limit) +{ + rlim_t value; + int lim; + + for (lim = 0; lim < NUM_LIMITS; lim++) { + if ((1 << lim) & VLIM_NOCHECK) + continue; + value = __rlim_get(limit, lim); + vxwprintk_xid(value, + "!!! limit: %p[%s,%d] = %ld on exit.", + limit, vlimit_name[lim], lim, (long)value); + } +} + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/limit_proc.h kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/limit_proc.h --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/limit_proc.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/limit_proc.h 2012-01-16 14:51:22.049408239 +0100 @@ -0,0 +1,57 @@ +#ifndef _VX_LIMIT_PROC_H +#define _VX_LIMIT_PROC_H + +#include + + +#define VX_LIMIT_FMT ":\t%8ld\t%8ld/%8ld\t%8lld/%8lld\t%6d\n" +#define VX_LIMIT_TOP \ + "Limit\t current\t min/max\t\t soft/hard\t\thits\n" + +#define VX_LIMIT_ARG(r) \ + (unsigned long)__rlim_get(limit, r), \ + (unsigned long)__rlim_rmin(limit, r), \ + (unsigned long)__rlim_rmax(limit, r), \ + VX_VLIM(__rlim_soft(limit, r)), \ + VX_VLIM(__rlim_hard(limit, r)), \ + atomic_read(&__rlim_lhit(limit, r)) + +static inline int vx_info_proc_limit(struct _vx_limit *limit, char *buffer) +{ + vx_limit_fixup(limit, -1); + return sprintf(buffer, VX_LIMIT_TOP + "PROC" VX_LIMIT_FMT + "VM" VX_LIMIT_FMT + "VML" VX_LIMIT_FMT + "RSS" VX_LIMIT_FMT + "ANON" VX_LIMIT_FMT + "RMAP" VX_LIMIT_FMT + "FILES" VX_LIMIT_FMT + "OFD" VX_LIMIT_FMT + "LOCKS" VX_LIMIT_FMT + "SOCK" VX_LIMIT_FMT + "MSGQ" VX_LIMIT_FMT + "SHM" VX_LIMIT_FMT + "SEMA" VX_LIMIT_FMT + "SEMS" VX_LIMIT_FMT + "DENT" VX_LIMIT_FMT, + VX_LIMIT_ARG(RLIMIT_NPROC), + VX_LIMIT_ARG(RLIMIT_AS), + VX_LIMIT_ARG(RLIMIT_MEMLOCK), + VX_LIMIT_ARG(RLIMIT_RSS), + VX_LIMIT_ARG(VLIMIT_ANON), + VX_LIMIT_ARG(VLIMIT_MAPPED), + VX_LIMIT_ARG(RLIMIT_NOFILE), + VX_LIMIT_ARG(VLIMIT_OPENFD), + VX_LIMIT_ARG(RLIMIT_LOCKS), + VX_LIMIT_ARG(VLIMIT_NSOCK), + VX_LIMIT_ARG(RLIMIT_MSGQUEUE), + VX_LIMIT_ARG(VLIMIT_SHMEM), + VX_LIMIT_ARG(VLIMIT_SEMARY), + VX_LIMIT_ARG(VLIMIT_NSEMS), + VX_LIMIT_ARG(VLIMIT_DENTRY)); +} + +#endif /* _VX_LIMIT_PROC_H */ + + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/Makefile kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/Makefile --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/Makefile 2012-01-16 14:51:22.033408295 +0100 @@ -0,0 +1,18 @@ +# +# Makefile for the Linux vserver routines. +# + + +obj-y += vserver.o + +vserver-y := switch.o context.o space.o sched.o network.o inode.o \ + limit.o cvirt.o cacct.o signal.o helper.o init.o \ + dlimit.o tag.o + +vserver-$(CONFIG_INET) += inet.o +vserver-$(CONFIG_PROC_FS) += proc.o +vserver-$(CONFIG_VSERVER_DEBUG) += sysctl.o debug.o +vserver-$(CONFIG_VSERVER_HISTORY) += history.o +vserver-$(CONFIG_VSERVER_MONITOR) += monitor.o +vserver-$(CONFIG_VSERVER_DEVICE) += device.o + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/monitor.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/monitor.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/monitor.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/monitor.c 2012-01-16 14:51:22.049408239 +0100 @@ -0,0 +1,138 @@ +/* + * kernel/vserver/monitor.c + * + * Virtual Context Scheduler Monitor + * + * Copyright (C) 2006-2007 Herbert Pötzl + * + * V0.01 basic design + * + */ + +#include +#include +#include +#include + +#include +#include + + +#ifdef CONFIG_VSERVER_MONITOR +#define VXM_SIZE CONFIG_VSERVER_MONITOR_SIZE +#else +#define VXM_SIZE 64 +#endif + +struct _vx_monitor { + unsigned int counter; + + struct _vx_mon_entry entry[VXM_SIZE+1]; +}; + + +DEFINE_PER_CPU(struct _vx_monitor, vx_monitor_buffer); + +unsigned volatile int vxm_active = 1; + +static atomic_t sequence = ATOMIC_INIT(0); + + +/* vxm_advance() + + * requires disabled preemption */ + +struct _vx_mon_entry *vxm_advance(int cpu) +{ + struct _vx_monitor *mon = &per_cpu(vx_monitor_buffer, cpu); + struct _vx_mon_entry *entry; + unsigned int index; + + index = vxm_active ? (mon->counter++ % VXM_SIZE) : VXM_SIZE; + entry = &mon->entry[index]; + + entry->ev.seq = atomic_inc_return(&sequence); + entry->ev.jif = jiffies; + return entry; +} + +EXPORT_SYMBOL_GPL(vxm_advance); + + +int do_read_monitor(struct __user _vx_mon_entry *data, + int cpu, uint32_t *index, uint32_t *count) +{ + int pos, ret = 0; + struct _vx_monitor *mon = &per_cpu(vx_monitor_buffer, cpu); + int end = mon->counter; + int start = end - VXM_SIZE + 2; + int idx = *index; + + /* special case: get current pos */ + if (!*count) { + *index = end; + return 0; + } + + /* have we lost some data? */ + if (idx < start) + idx = start; + + for (pos = 0; (pos < *count) && (idx < end); pos++, idx++) { + struct _vx_mon_entry *entry = + &mon->entry[idx % VXM_SIZE]; + + /* send entry to userspace */ + ret = copy_to_user(&data[pos], entry, sizeof(*entry)); + if (ret) + break; + } + /* save new index and count */ + *index = idx; + *count = pos; + return ret ? ret : (*index < end); +} + +int vc_read_monitor(uint32_t id, void __user *data) +{ + struct vcmd_read_monitor_v0 vc_data; + int ret; + + if (id >= NR_CPUS) + return -EINVAL; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = do_read_monitor((struct __user _vx_mon_entry *)vc_data.data, + id, &vc_data.index, &vc_data.count); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return ret; +} + +#ifdef CONFIG_COMPAT + +int vc_read_monitor_x32(uint32_t id, void __user *data) +{ + struct vcmd_read_monitor_v0_x32 vc_data; + int ret; + + if (id >= NR_CPUS) + return -EINVAL; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = do_read_monitor((struct __user _vx_mon_entry *) + compat_ptr(vc_data.data_ptr), + id, &vc_data.index, &vc_data.count); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return ret; +} + +#endif /* CONFIG_COMPAT */ + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/network.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/network.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/network.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/network.c 2012-01-16 14:51:22.053408225 +0100 @@ -0,0 +1,864 @@ +/* + * linux/kernel/vserver/network.c + * + * Virtual Server: Network Support + * + * Copyright (C) 2003-2007 Herbert Pötzl + * + * V0.01 broken out from vcontext V0.05 + * V0.02 cleaned up implementation + * V0.03 added equiv nx commands + * V0.04 switch to RCU based hash + * V0.05 and back to locking again + * V0.06 changed vcmds to nxi arg + * V0.07 have __create claim() the nxi + * + */ + +#include +#include +#include + +#include +#include +#include + + +atomic_t nx_global_ctotal = ATOMIC_INIT(0); +atomic_t nx_global_cactive = ATOMIC_INIT(0); + +static struct kmem_cache *nx_addr_v4_cachep = NULL; +static struct kmem_cache *nx_addr_v6_cachep = NULL; + + +static int __init init_network(void) +{ + nx_addr_v4_cachep = kmem_cache_create("nx_v4_addr_cache", + sizeof(struct nx_addr_v4), 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + nx_addr_v6_cachep = kmem_cache_create("nx_v6_addr_cache", + sizeof(struct nx_addr_v6), 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + return 0; +} + + +/* __alloc_nx_addr_v4() */ + +static inline struct nx_addr_v4 *__alloc_nx_addr_v4(void) +{ + struct nx_addr_v4 *nxa = kmem_cache_alloc( + nx_addr_v4_cachep, GFP_KERNEL); + + if (!IS_ERR(nxa)) + memset(nxa, 0, sizeof(*nxa)); + return nxa; +} + +/* __dealloc_nx_addr_v4() */ + +static inline void __dealloc_nx_addr_v4(struct nx_addr_v4 *nxa) +{ + kmem_cache_free(nx_addr_v4_cachep, nxa); +} + +/* __dealloc_nx_addr_v4_all() */ + +static inline void __dealloc_nx_addr_v4_all(struct nx_addr_v4 *nxa) +{ + while (nxa) { + struct nx_addr_v4 *next = nxa->next; + + __dealloc_nx_addr_v4(nxa); + nxa = next; + } +} + + +#ifdef CONFIG_IPV6 + +/* __alloc_nx_addr_v6() */ + +static inline struct nx_addr_v6 *__alloc_nx_addr_v6(void) +{ + struct nx_addr_v6 *nxa = kmem_cache_alloc( + nx_addr_v6_cachep, GFP_KERNEL); + + if (!IS_ERR(nxa)) + memset(nxa, 0, sizeof(*nxa)); + return nxa; +} + +/* __dealloc_nx_addr_v6() */ + +static inline void __dealloc_nx_addr_v6(struct nx_addr_v6 *nxa) +{ + kmem_cache_free(nx_addr_v6_cachep, nxa); +} + +/* __dealloc_nx_addr_v6_all() */ + +static inline void __dealloc_nx_addr_v6_all(struct nx_addr_v6 *nxa) +{ + while (nxa) { + struct nx_addr_v6 *next = nxa->next; + + __dealloc_nx_addr_v6(nxa); + nxa = next; + } +} + +#endif /* CONFIG_IPV6 */ + +/* __alloc_nx_info() + + * allocate an initialized nx_info struct + * doesn't make it visible (hash) */ + +static struct nx_info *__alloc_nx_info(nid_t nid) +{ + struct nx_info *new = NULL; + + vxdprintk(VXD_CBIT(nid, 1), "alloc_nx_info(%d)*", nid); + + /* would this benefit from a slab cache? */ + new = kmalloc(sizeof(struct nx_info), GFP_KERNEL); + if (!new) + return 0; + + memset(new, 0, sizeof(struct nx_info)); + new->nx_id = nid; + INIT_HLIST_NODE(&new->nx_hlist); + atomic_set(&new->nx_usecnt, 0); + atomic_set(&new->nx_tasks, 0); + new->nx_state = 0; + + new->nx_flags = NXF_INIT_SET; + + /* rest of init goes here */ + + new->v4_lback.s_addr = htonl(INADDR_LOOPBACK); + new->v4_bcast.s_addr = htonl(INADDR_BROADCAST); + + vxdprintk(VXD_CBIT(nid, 0), + "alloc_nx_info(%d) = %p", nid, new); + atomic_inc(&nx_global_ctotal); + return new; +} + +/* __dealloc_nx_info() + + * final disposal of nx_info */ + +static void __dealloc_nx_info(struct nx_info *nxi) +{ + vxdprintk(VXD_CBIT(nid, 0), + "dealloc_nx_info(%p)", nxi); + + nxi->nx_hlist.next = LIST_POISON1; + nxi->nx_id = -1; + + BUG_ON(atomic_read(&nxi->nx_usecnt)); + BUG_ON(atomic_read(&nxi->nx_tasks)); + + __dealloc_nx_addr_v4_all(nxi->v4.next); + + nxi->nx_state |= NXS_RELEASED; + kfree(nxi); + atomic_dec(&nx_global_ctotal); +} + +static void __shutdown_nx_info(struct nx_info *nxi) +{ + nxi->nx_state |= NXS_SHUTDOWN; + vs_net_change(nxi, VSC_NETDOWN); +} + +/* exported stuff */ + +void free_nx_info(struct nx_info *nxi) +{ + /* context shutdown is mandatory */ + BUG_ON(nxi->nx_state != NXS_SHUTDOWN); + + /* context must not be hashed */ + BUG_ON(nxi->nx_state & NXS_HASHED); + + BUG_ON(atomic_read(&nxi->nx_usecnt)); + BUG_ON(atomic_read(&nxi->nx_tasks)); + + __dealloc_nx_info(nxi); +} + + +void __nx_set_lback(struct nx_info *nxi) +{ + int nid = nxi->nx_id; + __be32 lback = htonl(INADDR_LOOPBACK ^ ((nid & 0xFFFF) << 8)); + + nxi->v4_lback.s_addr = lback; +} + +extern int __nx_inet_add_lback(__be32 addr); +extern int __nx_inet_del_lback(__be32 addr); + + +/* hash table for nx_info hash */ + +#define NX_HASH_SIZE 13 + +struct hlist_head nx_info_hash[NX_HASH_SIZE]; + +static spinlock_t nx_info_hash_lock = SPIN_LOCK_UNLOCKED; + + +static inline unsigned int __hashval(nid_t nid) +{ + return (nid % NX_HASH_SIZE); +} + + + +/* __hash_nx_info() + + * add the nxi to the global hash table + * requires the hash_lock to be held */ + +static inline void __hash_nx_info(struct nx_info *nxi) +{ + struct hlist_head *head; + + vxd_assert_lock(&nx_info_hash_lock); + vxdprintk(VXD_CBIT(nid, 4), + "__hash_nx_info: %p[#%d]", nxi, nxi->nx_id); + + /* context must not be hashed */ + BUG_ON(nx_info_state(nxi, NXS_HASHED)); + + nxi->nx_state |= NXS_HASHED; + head = &nx_info_hash[__hashval(nxi->nx_id)]; + hlist_add_head(&nxi->nx_hlist, head); + atomic_inc(&nx_global_cactive); +} + +/* __unhash_nx_info() + + * remove the nxi from the global hash table + * requires the hash_lock to be held */ + +static inline void __unhash_nx_info(struct nx_info *nxi) +{ + vxd_assert_lock(&nx_info_hash_lock); + vxdprintk(VXD_CBIT(nid, 4), + "__unhash_nx_info: %p[#%d.%d.%d]", nxi, nxi->nx_id, + atomic_read(&nxi->nx_usecnt), atomic_read(&nxi->nx_tasks)); + + /* context must be hashed */ + BUG_ON(!nx_info_state(nxi, NXS_HASHED)); + /* but without tasks */ + BUG_ON(atomic_read(&nxi->nx_tasks)); + + nxi->nx_state &= ~NXS_HASHED; + hlist_del(&nxi->nx_hlist); + atomic_dec(&nx_global_cactive); +} + + +/* __lookup_nx_info() + + * requires the hash_lock to be held + * doesn't increment the nx_refcnt */ + +static inline struct nx_info *__lookup_nx_info(nid_t nid) +{ + struct hlist_head *head = &nx_info_hash[__hashval(nid)]; + struct hlist_node *pos; + struct nx_info *nxi; + + vxd_assert_lock(&nx_info_hash_lock); + hlist_for_each(pos, head) { + nxi = hlist_entry(pos, struct nx_info, nx_hlist); + + if (nxi->nx_id == nid) + goto found; + } + nxi = NULL; +found: + vxdprintk(VXD_CBIT(nid, 0), + "__lookup_nx_info(#%u): %p[#%u]", + nid, nxi, nxi ? nxi->nx_id : 0); + return nxi; +} + + +/* __create_nx_info() + + * create the requested context + * get(), claim() and hash it */ + +static struct nx_info *__create_nx_info(int id) +{ + struct nx_info *new, *nxi = NULL; + + vxdprintk(VXD_CBIT(nid, 1), "create_nx_info(%d)*", id); + + if (!(new = __alloc_nx_info(id))) + return ERR_PTR(-ENOMEM); + + /* required to make dynamic xids unique */ + spin_lock(&nx_info_hash_lock); + + /* static context requested */ + if ((nxi = __lookup_nx_info(id))) { + vxdprintk(VXD_CBIT(nid, 0), + "create_nx_info(%d) = %p (already there)", id, nxi); + if (nx_info_flags(nxi, NXF_STATE_SETUP, 0)) + nxi = ERR_PTR(-EBUSY); + else + nxi = ERR_PTR(-EEXIST); + goto out_unlock; + } + /* new context */ + vxdprintk(VXD_CBIT(nid, 0), + "create_nx_info(%d) = %p (new)", id, new); + claim_nx_info(new, NULL); + __nx_set_lback(new); + __hash_nx_info(get_nx_info(new)); + nxi = new, new = NULL; + +out_unlock: + spin_unlock(&nx_info_hash_lock); + if (new) + __dealloc_nx_info(new); + return nxi; +} + + + +/* exported stuff */ + + +void unhash_nx_info(struct nx_info *nxi) +{ + __shutdown_nx_info(nxi); + spin_lock(&nx_info_hash_lock); + __unhash_nx_info(nxi); + spin_unlock(&nx_info_hash_lock); +} + +/* lookup_nx_info() + + * search for a nx_info and get() it + * negative id means current */ + +struct nx_info *lookup_nx_info(int id) +{ + struct nx_info *nxi = NULL; + + if (id < 0) { + nxi = get_nx_info(current_nx_info()); + } else if (id > 1) { + spin_lock(&nx_info_hash_lock); + nxi = get_nx_info(__lookup_nx_info(id)); + spin_unlock(&nx_info_hash_lock); + } + return nxi; +} + +/* nid_is_hashed() + + * verify that nid is still hashed */ + +int nid_is_hashed(nid_t nid) +{ + int hashed; + + spin_lock(&nx_info_hash_lock); + hashed = (__lookup_nx_info(nid) != NULL); + spin_unlock(&nx_info_hash_lock); + return hashed; +} + + +#ifdef CONFIG_PROC_FS + +/* get_nid_list() + + * get a subset of hashed nids for proc + * assumes size is at least one */ + +int get_nid_list(int index, unsigned int *nids, int size) +{ + int hindex, nr_nids = 0; + + /* only show current and children */ + if (!nx_check(0, VS_ADMIN | VS_WATCH)) { + if (index > 0) + return 0; + nids[nr_nids] = nx_current_nid(); + return 1; + } + + for (hindex = 0; hindex < NX_HASH_SIZE; hindex++) { + struct hlist_head *head = &nx_info_hash[hindex]; + struct hlist_node *pos; + + spin_lock(&nx_info_hash_lock); + hlist_for_each(pos, head) { + struct nx_info *nxi; + + if (--index > 0) + continue; + + nxi = hlist_entry(pos, struct nx_info, nx_hlist); + nids[nr_nids] = nxi->nx_id; + if (++nr_nids >= size) { + spin_unlock(&nx_info_hash_lock); + goto out; + } + } + /* keep the lock time short */ + spin_unlock(&nx_info_hash_lock); + } +out: + return nr_nids; +} +#endif + + +/* + * migrate task to new network + * gets nxi, puts old_nxi on change + */ + +int nx_migrate_task(struct task_struct *p, struct nx_info *nxi) +{ + struct nx_info *old_nxi; + int ret = 0; + + if (!p || !nxi) + BUG(); + + vxdprintk(VXD_CBIT(nid, 5), + "nx_migrate_task(%p,%p[#%d.%d.%d])", + p, nxi, nxi->nx_id, + atomic_read(&nxi->nx_usecnt), + atomic_read(&nxi->nx_tasks)); + + if (nx_info_flags(nxi, NXF_INFO_PRIVATE, 0) && + !nx_info_flags(nxi, NXF_STATE_SETUP, 0)) + return -EACCES; + + if (nx_info_state(nxi, NXS_SHUTDOWN)) + return -EFAULT; + + /* maybe disallow this completely? */ + old_nxi = task_get_nx_info(p); + if (old_nxi == nxi) + goto out; + + task_lock(p); + if (old_nxi) + clr_nx_info(&p->nx_info); + claim_nx_info(nxi, p); + set_nx_info(&p->nx_info, nxi); + p->nid = nxi->nx_id; + task_unlock(p); + + vxdprintk(VXD_CBIT(nid, 5), + "moved task %p into nxi:%p[#%d]", + p, nxi, nxi->nx_id); + + if (old_nxi) + release_nx_info(old_nxi, p); + ret = 0; +out: + put_nx_info(old_nxi); + return ret; +} + + +void nx_set_persistent(struct nx_info *nxi) +{ + vxdprintk(VXD_CBIT(nid, 6), + "nx_set_persistent(%p[#%d])", nxi, nxi->nx_id); + + get_nx_info(nxi); + claim_nx_info(nxi, NULL); +} + +void nx_clear_persistent(struct nx_info *nxi) +{ + vxdprintk(VXD_CBIT(nid, 6), + "nx_clear_persistent(%p[#%d])", nxi, nxi->nx_id); + + release_nx_info(nxi, NULL); + put_nx_info(nxi); +} + +void nx_update_persistent(struct nx_info *nxi) +{ + if (nx_info_flags(nxi, NXF_PERSISTENT, 0)) + nx_set_persistent(nxi); + else + nx_clear_persistent(nxi); +} + +/* vserver syscall commands below here */ + +/* taks nid and nx_info functions */ + +#include + + +int vc_task_nid(uint32_t id) +{ + nid_t nid; + + if (id) { + struct task_struct *tsk; + + read_lock(&tasklist_lock); + tsk = find_task_by_real_pid(id); + nid = (tsk) ? tsk->nid : -ESRCH; + read_unlock(&tasklist_lock); + } else + nid = nx_current_nid(); + return nid; +} + + +int vc_nx_info(struct nx_info *nxi, void __user *data) +{ + struct vcmd_nx_info_v0 vc_data; + + vc_data.nid = nxi->nx_id; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + + +/* network functions */ + +int vc_net_create(uint32_t nid, void __user *data) +{ + struct vcmd_net_create vc_data = { .flagword = NXF_INIT_SET }; + struct nx_info *new_nxi; + int ret; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + if ((nid > MAX_S_CONTEXT) || (nid < 2)) + return -EINVAL; + + new_nxi = __create_nx_info(nid); + if (IS_ERR(new_nxi)) + return PTR_ERR(new_nxi); + + /* initial flags */ + new_nxi->nx_flags = vc_data.flagword; + + ret = -ENOEXEC; + if (vs_net_change(new_nxi, VSC_NETUP)) + goto out; + + ret = nx_migrate_task(current, new_nxi); + if (ret) + goto out; + + /* return context id on success */ + ret = new_nxi->nx_id; + + /* get a reference for persistent contexts */ + if ((vc_data.flagword & NXF_PERSISTENT)) + nx_set_persistent(new_nxi); +out: + release_nx_info(new_nxi, NULL); + put_nx_info(new_nxi); + return ret; +} + + +int vc_net_migrate(struct nx_info *nxi, void __user *data) +{ + return nx_migrate_task(current, nxi); +} + + + +int do_add_v4_addr(struct nx_info *nxi, __be32 ip, __be32 ip2, __be32 mask, + uint16_t type, uint16_t flags) +{ + struct nx_addr_v4 *nxa = &nxi->v4; + + if (NX_IPV4(nxi)) { + /* locate last entry */ + for (; nxa->next; nxa = nxa->next); + nxa->next = __alloc_nx_addr_v4(); + nxa = nxa->next; + + if (IS_ERR(nxa)) + return PTR_ERR(nxa); + } + + if (nxi->v4.next) + /* remove single ip for ip list */ + nxi->nx_flags &= ~NXF_SINGLE_IP; + + nxa->ip[0].s_addr = ip; + nxa->ip[1].s_addr = ip2; + nxa->mask.s_addr = mask; + nxa->type = type; + nxa->flags = flags; + return 0; +} + + +int vc_net_add(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_addr_v0 vc_data; + int index, ret = 0; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + switch (vc_data.type) { + case NXA_TYPE_IPV4: + if ((vc_data.count < 1) || (vc_data.count > 4)) + return -EINVAL; + + index = 0; + while (index < vc_data.count) { + ret = do_add_v4_addr(nxi, vc_data.ip[index].s_addr, 0, + vc_data.mask[index].s_addr, NXA_TYPE_ADDR, 0); + if (ret) + return ret; + index++; + } + ret = index; + break; + + case NXA_TYPE_IPV4|NXA_MOD_BCAST: + nxi->v4_bcast = vc_data.ip[0]; + ret = 1; + break; + + case NXA_TYPE_IPV4|NXA_MOD_LBACK: + nxi->v4_lback = vc_data.ip[0]; + ret = 1; + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} + +int vc_net_remove(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_addr_v0 vc_data; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + switch (vc_data.type) { + case NXA_TYPE_ANY: + __dealloc_nx_addr_v4_all(xchg(&nxi->v4.next, NULL)); + memset(&nxi->v4, 0, sizeof(nxi->v4)); + break; + + default: + return -EINVAL; + } + return 0; +} + + +int vc_net_add_ipv4(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_addr_ipv4_v1 vc_data; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + switch (vc_data.type) { + case NXA_TYPE_ADDR: + case NXA_TYPE_RANGE: + case NXA_TYPE_MASK: + return do_add_v4_addr(nxi, vc_data.ip.s_addr, 0, + vc_data.mask.s_addr, vc_data.type, vc_data.flags); + + case NXA_TYPE_ADDR | NXA_MOD_BCAST: + nxi->v4_bcast = vc_data.ip; + break; + + case NXA_TYPE_ADDR | NXA_MOD_LBACK: + nxi->v4_lback = vc_data.ip; + break; + + default: + return -EINVAL; + } + return 0; +} + +int vc_net_remove_ipv4(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_addr_ipv4_v1 vc_data; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + switch (vc_data.type) { +/* case NXA_TYPE_ADDR: + break; */ + + case NXA_TYPE_ANY: + __dealloc_nx_addr_v4_all(xchg(&nxi->v4.next, NULL)); + memset(&nxi->v4, 0, sizeof(nxi->v4)); + break; + + default: + return -EINVAL; + } + return 0; +} + + +#ifdef CONFIG_IPV6 + +int do_add_v6_addr(struct nx_info *nxi, + struct in6_addr *ip, struct in6_addr *mask, + uint32_t prefix, uint16_t type, uint16_t flags) +{ + struct nx_addr_v6 *nxa = &nxi->v6; + + if (NX_IPV6(nxi)) { + /* locate last entry */ + for (; nxa->next; nxa = nxa->next); + nxa->next = __alloc_nx_addr_v6(); + nxa = nxa->next; + + if (IS_ERR(nxa)) + return PTR_ERR(nxa); + } + + nxa->ip = *ip; + nxa->mask = *mask; + nxa->prefix = prefix; + nxa->type = type; + nxa->flags = flags; + return 0; +} + + +int vc_net_add_ipv6(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_addr_ipv6_v1 vc_data; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + switch (vc_data.type) { + case NXA_TYPE_ADDR: + case NXA_TYPE_MASK: + return do_add_v6_addr(nxi, &vc_data.ip, &vc_data.mask, + vc_data.prefix, vc_data.type, vc_data.flags); + default: + return -EINVAL; + } + return 0; +} + +int vc_net_remove_ipv6(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_addr_ipv6_v1 vc_data; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + switch (vc_data.type) { + case NXA_TYPE_ANY: + __dealloc_nx_addr_v6_all(xchg(&nxi->v6.next, NULL)); + memset(&nxi->v6, 0, sizeof(nxi->v6)); + break; + + default: + return -EINVAL; + } + return 0; +} + +#endif /* CONFIG_IPV6 */ + + +int vc_get_nflags(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_flags_v0 vc_data; + + vc_data.flagword = nxi->nx_flags; + + /* special STATE flag handling */ + vc_data.mask = vs_mask_flags(~0ULL, nxi->nx_flags, NXF_ONE_TIME); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +int vc_set_nflags(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_flags_v0 vc_data; + uint64_t mask, trigger; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + /* special STATE flag handling */ + mask = vs_mask_mask(vc_data.mask, nxi->nx_flags, NXF_ONE_TIME); + trigger = (mask & nxi->nx_flags) ^ (mask & vc_data.flagword); + + nxi->nx_flags = vs_mask_flags(nxi->nx_flags, + vc_data.flagword, mask); + if (trigger & NXF_PERSISTENT) + nx_update_persistent(nxi); + + return 0; +} + +int vc_get_ncaps(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_caps_v0 vc_data; + + vc_data.ncaps = nxi->nx_ncaps; + vc_data.cmask = ~0ULL; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +int vc_set_ncaps(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_caps_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + nxi->nx_ncaps = vs_mask_flags(nxi->nx_ncaps, + vc_data.ncaps, vc_data.cmask); + return 0; +} + + +#include + +module_init(init_network); + +EXPORT_SYMBOL_GPL(free_nx_info); +EXPORT_SYMBOL_GPL(unhash_nx_info); + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/proc.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/proc.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/proc.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/proc.c 2012-01-16 14:51:22.053408225 +0100 @@ -0,0 +1,1098 @@ +/* + * linux/kernel/vserver/proc.c + * + * Virtual Context Support + * + * Copyright (C) 2003-2007 Herbert Pötzl + * + * V0.01 basic structure + * V0.02 adaptation vs1.3.0 + * V0.03 proc permissions + * V0.04 locking/generic + * V0.05 next generation procfs + * V0.06 inode validation + * V0.07 generic rewrite vid + * V0.08 remove inode type + * + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include "cvirt_proc.h" +#include "cacct_proc.h" +#include "limit_proc.h" +#include "sched_proc.h" +#include "vci_config.h" + + +static inline char *print_cap_t(char *buffer, kernel_cap_t *c) +{ + unsigned __capi; + + CAP_FOR_EACH_U32(__capi) { + buffer += sprintf(buffer, "%08x", + c->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]); + } + return buffer; +} + + +static struct proc_dir_entry *proc_virtual; + +static struct proc_dir_entry *proc_virtnet; + + +/* first the actual feeds */ + + +static int proc_vci(char *buffer) +{ + return sprintf(buffer, + "VCIVersion:\t%04x:%04x\n" + "VCISyscall:\t%d\n" + "VCIKernel:\t%08x\n", + VCI_VERSION >> 16, + VCI_VERSION & 0xFFFF, + __NR_vserver, + vci_kernel_config()); +} + +static int proc_virtual_info(char *buffer) +{ + return proc_vci(buffer); +} + +static int proc_virtual_status(char *buffer) +{ + return sprintf(buffer, + "#CTotal:\t%d\n" + "#CActive:\t%d\n" + "#NSProxy:\t%d\t%d %d %d %d %d %d\n" + "#InitTask:\t%d\t%d %d\n", + atomic_read(&vx_global_ctotal), + atomic_read(&vx_global_cactive), + atomic_read(&vs_global_nsproxy), + atomic_read(&vs_global_fs), + atomic_read(&vs_global_mnt_ns), + atomic_read(&vs_global_uts_ns), + atomic_read(&nr_ipc_ns), + atomic_read(&vs_global_user_ns), + atomic_read(&vs_global_pid_ns), + atomic_read(&init_task.usage), + atomic_read(&init_task.nsproxy->count), + init_task.fs->users); +} + + +int proc_vxi_info(struct vx_info *vxi, char *buffer) +{ + int length; + + length = sprintf(buffer, + "ID:\t%d\n" + "Info:\t%p\n" + "Init:\t%d\n" + "OOM:\t%lld\n", + vxi->vx_id, + vxi, + vxi->vx_initpid, + vxi->vx_badness_bias); + return length; +} + +int proc_vxi_status(struct vx_info *vxi, char *buffer) +{ + char *orig = buffer; + + buffer += sprintf(buffer, + "UseCnt:\t%d\n" + "Tasks:\t%d\n" + "Flags:\t%016llx\n", + atomic_read(&vxi->vx_usecnt), + atomic_read(&vxi->vx_tasks), + (unsigned long long)vxi->vx_flags); + + buffer += sprintf(buffer, "BCaps:\t"); + buffer = print_cap_t(buffer, &vxi->vx_bcaps); + buffer += sprintf(buffer, "\n"); + + buffer += sprintf(buffer, + "CCaps:\t%016llx\n" + "Spaces:\t%08lx %08lx\n", + (unsigned long long)vxi->vx_ccaps, + vxi->vx_nsmask[0], vxi->vx_nsmask[1]); + return buffer - orig; +} + +int proc_vxi_limit(struct vx_info *vxi, char *buffer) +{ + return vx_info_proc_limit(&vxi->limit, buffer); +} + +int proc_vxi_sched(struct vx_info *vxi, char *buffer) +{ + int cpu, length; + + length = vx_info_proc_sched(&vxi->sched, buffer); + for_each_online_cpu(cpu) { + length += vx_info_proc_sched_pc( + &vx_per_cpu(vxi, sched_pc, cpu), + buffer + length, cpu); + } + return length; +} + +int proc_vxi_nsproxy0(struct vx_info *vxi, char *buffer) +{ + return vx_info_proc_nsproxy(vxi->vx_nsproxy[0], buffer); +} + +int proc_vxi_nsproxy1(struct vx_info *vxi, char *buffer) +{ + return vx_info_proc_nsproxy(vxi->vx_nsproxy[1], buffer); +} + +int proc_vxi_cvirt(struct vx_info *vxi, char *buffer) +{ + int cpu, length; + + vx_update_load(vxi); + length = vx_info_proc_cvirt(&vxi->cvirt, buffer); + for_each_online_cpu(cpu) { + length += vx_info_proc_cvirt_pc( + &vx_per_cpu(vxi, cvirt_pc, cpu), + buffer + length, cpu); + } + return length; +} + +int proc_vxi_cacct(struct vx_info *vxi, char *buffer) +{ + return vx_info_proc_cacct(&vxi->cacct, buffer); +} + + +static int proc_virtnet_info(char *buffer) +{ + return proc_vci(buffer); +} + +static int proc_virtnet_status(char *buffer) +{ + return sprintf(buffer, + "#CTotal:\t%d\n" + "#CActive:\t%d\n", + atomic_read(&nx_global_ctotal), + atomic_read(&nx_global_cactive)); +} + +int proc_nxi_info(struct nx_info *nxi, char *buffer) +{ + struct nx_addr_v4 *v4a; +#ifdef CONFIG_IPV6 + struct nx_addr_v6 *v6a; +#endif + int length, i; + + length = sprintf(buffer, + "ID:\t%d\n" + "Info:\t%p\n" + "Bcast:\t" NIPQUAD_FMT "\n" + "Lback:\t" NIPQUAD_FMT "\n", + nxi->nx_id, + nxi, + NIPQUAD(nxi->v4_bcast.s_addr), + NIPQUAD(nxi->v4_lback.s_addr)); + + if (!NX_IPV4(nxi)) + goto skip_v4; + for (i = 0, v4a = &nxi->v4; v4a; i++, v4a = v4a->next) + length += sprintf(buffer + length, "%d:\t" NXAV4_FMT "\n", + i, NXAV4(v4a)); +skip_v4: +#ifdef CONFIG_IPV6 + if (!NX_IPV6(nxi)) + goto skip_v6; + for (i = 0, v6a = &nxi->v6; v6a; i++, v6a = v6a->next) + length += sprintf(buffer + length, "%d:\t" NXAV6_FMT "\n", + i, NXAV6(v6a)); +skip_v6: +#endif + return length; +} + +int proc_nxi_status(struct nx_info *nxi, char *buffer) +{ + int length; + + length = sprintf(buffer, + "UseCnt:\t%d\n" + "Tasks:\t%d\n" + "Flags:\t%016llx\n" + "NCaps:\t%016llx\n", + atomic_read(&nxi->nx_usecnt), + atomic_read(&nxi->nx_tasks), + (unsigned long long)nxi->nx_flags, + (unsigned long long)nxi->nx_ncaps); + return length; +} + + + +/* here the inode helpers */ + +struct vs_entry { + int len; + char *name; + mode_t mode; + struct inode_operations *iop; + struct file_operations *fop; + union proc_op op; +}; + +static struct inode *vs_proc_make_inode(struct super_block *sb, struct vs_entry *p) +{ + struct inode *inode = new_inode(sb); + + if (!inode) + goto out; + + inode->i_mode = p->mode; + if (p->iop) + inode->i_op = p->iop; + if (p->fop) + inode->i_fop = p->fop; + + inode->i_nlink = (p->mode & S_IFDIR) ? 2 : 1; + inode->i_flags |= S_IMMUTABLE; + + inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; + + inode->i_uid = 0; + inode->i_gid = 0; + inode->i_tag = 0; +out: + return inode; +} + +static struct dentry *vs_proc_instantiate(struct inode *dir, + struct dentry *dentry, int id, void *ptr) +{ + struct vs_entry *p = ptr; + struct inode *inode = vs_proc_make_inode(dir->i_sb, p); + struct dentry *error = ERR_PTR(-EINVAL); + + if (!inode) + goto out; + + PROC_I(inode)->op = p->op; + PROC_I(inode)->fd = id; + d_add(dentry, inode); + error = NULL; +out: + return error; +} + +/* Lookups */ + +typedef struct dentry *instantiate_t(struct inode *, struct dentry *, int, void *); + +/* + * Fill a directory entry. + * + * If possible create the dcache entry and derive our inode number and + * file type from dcache entry. + * + * Since all of the proc inode numbers are dynamically generated, the inode + * numbers do not exist until the inode is cache. This means creating the + * the dcache entry in readdir is necessary to keep the inode numbers + * reported by readdir in sync with the inode numbers reported + * by stat. + */ +static int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir, + char *name, int len, instantiate_t instantiate, int id, void *ptr) +{ + struct dentry *child, *dir = filp->f_dentry; + struct inode *inode; + struct qstr qname; + ino_t ino = 0; + unsigned type = DT_UNKNOWN; + + qname.name = name; + qname.len = len; + qname.hash = full_name_hash(name, len); + + child = d_lookup(dir, &qname); + if (!child) { + struct dentry *new; + new = d_alloc(dir, &qname); + if (new) { + child = instantiate(dir->d_inode, new, id, ptr); + if (child) + dput(new); + else + child = new; + } + } + if (!child || IS_ERR(child) || !child->d_inode) + goto end_instantiate; + inode = child->d_inode; + if (inode) { + ino = inode->i_ino; + type = inode->i_mode >> 12; + } + dput(child); +end_instantiate: + if (!ino) + ino = find_inode_number(dir, &qname); + if (!ino) + ino = 1; + return filldir(dirent, name, len, filp->f_pos, ino, type); +} + + + +/* get and revalidate vx_info/xid */ + +static inline +struct vx_info *get_proc_vx_info(struct inode *inode) +{ + return lookup_vx_info(PROC_I(inode)->fd); +} + +static int proc_xid_revalidate(struct dentry *dentry, struct nameidata *nd) +{ + struct inode *inode = dentry->d_inode; + xid_t xid = PROC_I(inode)->fd; + + if (!xid || xid_is_hashed(xid)) + return 1; + d_drop(dentry); + return 0; +} + + +/* get and revalidate nx_info/nid */ + +static int proc_nid_revalidate(struct dentry *dentry, struct nameidata *nd) +{ + struct inode *inode = dentry->d_inode; + nid_t nid = PROC_I(inode)->fd; + + if (!nid || nid_is_hashed(nid)) + return 1; + d_drop(dentry); + return 0; +} + + + +#define PROC_BLOCK_SIZE (PAGE_SIZE - 1024) + +static ssize_t proc_vs_info_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct inode *inode = file->f_dentry->d_inode; + unsigned long page; + ssize_t length = 0; + + if (count > PROC_BLOCK_SIZE) + count = PROC_BLOCK_SIZE; + + /* fade that out as soon as stable */ + WARN_ON(PROC_I(inode)->fd); + + if (!(page = __get_free_page(GFP_KERNEL))) + return -ENOMEM; + + BUG_ON(!PROC_I(inode)->op.proc_vs_read); + length = PROC_I(inode)->op.proc_vs_read((char *)page); + + if (length >= 0) + length = simple_read_from_buffer(buf, count, ppos, + (char *)page, length); + + free_page(page); + return length; +} + +static ssize_t proc_vx_info_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct inode *inode = file->f_dentry->d_inode; + struct vx_info *vxi = NULL; + xid_t xid = PROC_I(inode)->fd; + unsigned long page; + ssize_t length = 0; + + if (count > PROC_BLOCK_SIZE) + count = PROC_BLOCK_SIZE; + + /* fade that out as soon as stable */ + WARN_ON(!xid); + vxi = lookup_vx_info(xid); + if (!vxi) + goto out; + + length = -ENOMEM; + if (!(page = __get_free_page(GFP_KERNEL))) + goto out_put; + + BUG_ON(!PROC_I(inode)->op.proc_vxi_read); + length = PROC_I(inode)->op.proc_vxi_read(vxi, (char *)page); + + if (length >= 0) + length = simple_read_from_buffer(buf, count, ppos, + (char *)page, length); + + free_page(page); +out_put: + put_vx_info(vxi); +out: + return length; +} + +static ssize_t proc_nx_info_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct inode *inode = file->f_dentry->d_inode; + struct nx_info *nxi = NULL; + nid_t nid = PROC_I(inode)->fd; + unsigned long page; + ssize_t length = 0; + + if (count > PROC_BLOCK_SIZE) + count = PROC_BLOCK_SIZE; + + /* fade that out as soon as stable */ + WARN_ON(!nid); + nxi = lookup_nx_info(nid); + if (!nxi) + goto out; + + length = -ENOMEM; + if (!(page = __get_free_page(GFP_KERNEL))) + goto out_put; + + BUG_ON(!PROC_I(inode)->op.proc_nxi_read); + length = PROC_I(inode)->op.proc_nxi_read(nxi, (char *)page); + + if (length >= 0) + length = simple_read_from_buffer(buf, count, ppos, + (char *)page, length); + + free_page(page); +out_put: + put_nx_info(nxi); +out: + return length; +} + + + +/* here comes the lower level */ + + +#define NOD(NAME, MODE, IOP, FOP, OP) { \ + .len = sizeof(NAME) - 1, \ + .name = (NAME), \ + .mode = MODE, \ + .iop = IOP, \ + .fop = FOP, \ + .op = OP, \ +} + + +#define DIR(NAME, MODE, OTYPE) \ + NOD(NAME, (S_IFDIR | (MODE)), \ + &proc_ ## OTYPE ## _inode_operations, \ + &proc_ ## OTYPE ## _file_operations, { } ) + +#define INF(NAME, MODE, OTYPE) \ + NOD(NAME, (S_IFREG | (MODE)), NULL, \ + &proc_vs_info_file_operations, \ + { .proc_vs_read = &proc_##OTYPE } ) + +#define VINF(NAME, MODE, OTYPE) \ + NOD(NAME, (S_IFREG | (MODE)), NULL, \ + &proc_vx_info_file_operations, \ + { .proc_vxi_read = &proc_##OTYPE } ) + +#define NINF(NAME, MODE, OTYPE) \ + NOD(NAME, (S_IFREG | (MODE)), NULL, \ + &proc_nx_info_file_operations, \ + { .proc_nxi_read = &proc_##OTYPE } ) + + +static struct file_operations proc_vs_info_file_operations = { + .read = proc_vs_info_read, +}; + +static struct file_operations proc_vx_info_file_operations = { + .read = proc_vx_info_read, +}; + +static struct dentry_operations proc_xid_dentry_operations = { + .d_revalidate = proc_xid_revalidate, +}; + +static struct vs_entry vx_base_stuff[] = { + VINF("info", S_IRUGO, vxi_info), + VINF("status", S_IRUGO, vxi_status), + VINF("limit", S_IRUGO, vxi_limit), + VINF("sched", S_IRUGO, vxi_sched), + VINF("nsproxy", S_IRUGO, vxi_nsproxy0), + VINF("nsproxy1",S_IRUGO, vxi_nsproxy1), + VINF("cvirt", S_IRUGO, vxi_cvirt), + VINF("cacct", S_IRUGO, vxi_cacct), + {} +}; + + + + +static struct dentry *proc_xid_instantiate(struct inode *dir, + struct dentry *dentry, int id, void *ptr) +{ + dentry->d_op = &proc_xid_dentry_operations; + return vs_proc_instantiate(dir, dentry, id, ptr); +} + +static struct dentry *proc_xid_lookup(struct inode *dir, + struct dentry *dentry, struct nameidata *nd) +{ + struct vs_entry *p = vx_base_stuff; + struct dentry *error = ERR_PTR(-ENOENT); + + for (; p->name; p++) { + if (p->len != dentry->d_name.len) + continue; + if (!memcmp(dentry->d_name.name, p->name, p->len)) + break; + } + if (!p->name) + goto out; + + error = proc_xid_instantiate(dir, dentry, PROC_I(dir)->fd, p); +out: + return error; +} + +static int proc_xid_readdir(struct file *filp, + void *dirent, filldir_t filldir) +{ + struct dentry *dentry = filp->f_dentry; + struct inode *inode = dentry->d_inode; + struct vs_entry *p = vx_base_stuff; + int size = sizeof(vx_base_stuff) / sizeof(struct vs_entry); + int pos, index; + u64 ino; + + pos = filp->f_pos; + switch (pos) { + case 0: + ino = inode->i_ino; + if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0) + goto out; + pos++; + /* fall through */ + case 1: + ino = parent_ino(dentry); + if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0) + goto out; + pos++; + /* fall through */ + default: + index = pos - 2; + if (index >= size) + goto out; + for (p += index; p->name; p++) { + if (proc_fill_cache(filp, dirent, filldir, p->name, p->len, + vs_proc_instantiate, PROC_I(inode)->fd, p)) + goto out; + pos++; + } + } +out: + filp->f_pos = pos; + return 1; +} + + + +static struct file_operations proc_nx_info_file_operations = { + .read = proc_nx_info_read, +}; + +static struct dentry_operations proc_nid_dentry_operations = { + .d_revalidate = proc_nid_revalidate, +}; + +static struct vs_entry nx_base_stuff[] = { + NINF("info", S_IRUGO, nxi_info), + NINF("status", S_IRUGO, nxi_status), + {} +}; + + +static struct dentry *proc_nid_instantiate(struct inode *dir, + struct dentry *dentry, int id, void *ptr) +{ + dentry->d_op = &proc_nid_dentry_operations; + return vs_proc_instantiate(dir, dentry, id, ptr); +} + +static struct dentry *proc_nid_lookup(struct inode *dir, + struct dentry *dentry, struct nameidata *nd) +{ + struct vs_entry *p = nx_base_stuff; + struct dentry *error = ERR_PTR(-ENOENT); + + for (; p->name; p++) { + if (p->len != dentry->d_name.len) + continue; + if (!memcmp(dentry->d_name.name, p->name, p->len)) + break; + } + if (!p->name) + goto out; + + error = proc_nid_instantiate(dir, dentry, PROC_I(dir)->fd, p); +out: + return error; +} + +static int proc_nid_readdir(struct file *filp, + void *dirent, filldir_t filldir) +{ + struct dentry *dentry = filp->f_dentry; + struct inode *inode = dentry->d_inode; + struct vs_entry *p = nx_base_stuff; + int size = sizeof(nx_base_stuff) / sizeof(struct vs_entry); + int pos, index; + u64 ino; + + pos = filp->f_pos; + switch (pos) { + case 0: + ino = inode->i_ino; + if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0) + goto out; + pos++; + /* fall through */ + case 1: + ino = parent_ino(dentry); + if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0) + goto out; + pos++; + /* fall through */ + default: + index = pos - 2; + if (index >= size) + goto out; + for (p += index; p->name; p++) { + if (proc_fill_cache(filp, dirent, filldir, p->name, p->len, + vs_proc_instantiate, PROC_I(inode)->fd, p)) + goto out; + pos++; + } + } +out: + filp->f_pos = pos; + return 1; +} + + +#define MAX_MULBY10 ((~0U - 9) / 10) + +static inline int atovid(const char *str, int len) +{ + int vid, c; + + vid = 0; + while (len-- > 0) { + c = *str - '0'; + str++; + if (c > 9) + return -1; + if (vid >= MAX_MULBY10) + return -1; + vid *= 10; + vid += c; + if (!vid) + return -1; + } + return vid; +} + +/* now the upper level (virtual) */ + + +static struct file_operations proc_xid_file_operations = { + .read = generic_read_dir, + .readdir = proc_xid_readdir, +}; + +static struct inode_operations proc_xid_inode_operations = { + .lookup = proc_xid_lookup, +}; + +static struct vs_entry vx_virtual_stuff[] = { + INF("info", S_IRUGO, virtual_info), + INF("status", S_IRUGO, virtual_status), + DIR(NULL, S_IRUGO | S_IXUGO, xid), +}; + + +static struct dentry *proc_virtual_lookup(struct inode *dir, + struct dentry *dentry, struct nameidata *nd) +{ + struct vs_entry *p = vx_virtual_stuff; + struct dentry *error = ERR_PTR(-ENOENT); + int id = 0; + + for (; p->name; p++) { + if (p->len != dentry->d_name.len) + continue; + if (!memcmp(dentry->d_name.name, p->name, p->len)) + break; + } + if (p->name) + goto instantiate; + + id = atovid(dentry->d_name.name, dentry->d_name.len); + if ((id < 0) || !xid_is_hashed(id)) + goto out; + +instantiate: + error = proc_xid_instantiate(dir, dentry, id, p); +out: + return error; +} + +static struct file_operations proc_nid_file_operations = { + .read = generic_read_dir, + .readdir = proc_nid_readdir, +}; + +static struct inode_operations proc_nid_inode_operations = { + .lookup = proc_nid_lookup, +}; + +static struct vs_entry nx_virtnet_stuff[] = { + INF("info", S_IRUGO, virtnet_info), + INF("status", S_IRUGO, virtnet_status), + DIR(NULL, S_IRUGO | S_IXUGO, nid), +}; + + +static struct dentry *proc_virtnet_lookup(struct inode *dir, + struct dentry *dentry, struct nameidata *nd) +{ + struct vs_entry *p = nx_virtnet_stuff; + struct dentry *error = ERR_PTR(-ENOENT); + int id = 0; + + for (; p->name; p++) { + if (p->len != dentry->d_name.len) + continue; + if (!memcmp(dentry->d_name.name, p->name, p->len)) + break; + } + if (p->name) + goto instantiate; + + id = atovid(dentry->d_name.name, dentry->d_name.len); + if ((id < 0) || !nid_is_hashed(id)) + goto out; + +instantiate: + error = proc_nid_instantiate(dir, dentry, id, p); +out: + return error; +} + + +#define PROC_MAXVIDS 32 + +int proc_virtual_readdir(struct file *filp, + void *dirent, filldir_t filldir) +{ + struct dentry *dentry = filp->f_dentry; + struct inode *inode = dentry->d_inode; + struct vs_entry *p = vx_virtual_stuff; + int size = sizeof(vx_virtual_stuff) / sizeof(struct vs_entry); + int pos, index; + unsigned int xid_array[PROC_MAXVIDS]; + char buf[PROC_NUMBUF]; + unsigned int nr_xids, i; + u64 ino; + + pos = filp->f_pos; + switch (pos) { + case 0: + ino = inode->i_ino; + if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0) + goto out; + pos++; + /* fall through */ + case 1: + ino = parent_ino(dentry); + if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0) + goto out; + pos++; + /* fall through */ + default: + index = pos - 2; + if (index >= size) + goto entries; + for (p += index; p->name; p++) { + if (proc_fill_cache(filp, dirent, filldir, p->name, p->len, + vs_proc_instantiate, 0, p)) + goto out; + pos++; + } + entries: + index = pos - size; + p = &vx_virtual_stuff[size - 1]; + nr_xids = get_xid_list(index, xid_array, PROC_MAXVIDS); + for (i = 0; i < nr_xids; i++) { + int n, xid = xid_array[i]; + unsigned int j = PROC_NUMBUF; + + n = xid; + do + buf[--j] = '0' + (n % 10); + while (n /= 10); + + if (proc_fill_cache(filp, dirent, filldir, + buf + j, PROC_NUMBUF - j, + vs_proc_instantiate, xid, p)) + goto out; + pos++; + } + } +out: + filp->f_pos = pos; + return 0; +} + +static int proc_virtual_getattr(struct vfsmount *mnt, + struct dentry *dentry, struct kstat *stat) +{ + struct inode *inode = dentry->d_inode; + + generic_fillattr(inode, stat); + stat->nlink = 2 + atomic_read(&vx_global_cactive); + return 0; +} + +static struct file_operations proc_virtual_dir_operations = { + .read = generic_read_dir, + .readdir = proc_virtual_readdir, +}; + +static struct inode_operations proc_virtual_dir_inode_operations = { + .getattr = proc_virtual_getattr, + .lookup = proc_virtual_lookup, +}; + + + + + +int proc_virtnet_readdir(struct file *filp, + void *dirent, filldir_t filldir) +{ + struct dentry *dentry = filp->f_dentry; + struct inode *inode = dentry->d_inode; + struct vs_entry *p = nx_virtnet_stuff; + int size = sizeof(nx_virtnet_stuff) / sizeof(struct vs_entry); + int pos, index; + unsigned int nid_array[PROC_MAXVIDS]; + char buf[PROC_NUMBUF]; + unsigned int nr_nids, i; + u64 ino; + + pos = filp->f_pos; + switch (pos) { + case 0: + ino = inode->i_ino; + if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0) + goto out; + pos++; + /* fall through */ + case 1: + ino = parent_ino(dentry); + if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0) + goto out; + pos++; + /* fall through */ + default: + index = pos - 2; + if (index >= size) + goto entries; + for (p += index; p->name; p++) { + if (proc_fill_cache(filp, dirent, filldir, p->name, p->len, + vs_proc_instantiate, 0, p)) + goto out; + pos++; + } + entries: + index = pos - size; + p = &nx_virtnet_stuff[size - 1]; + nr_nids = get_nid_list(index, nid_array, PROC_MAXVIDS); + for (i = 0; i < nr_nids; i++) { + int n, nid = nid_array[i]; + unsigned int j = PROC_NUMBUF; + + n = nid; + do + buf[--j] = '0' + (n % 10); + while (n /= 10); + + if (proc_fill_cache(filp, dirent, filldir, + buf + j, PROC_NUMBUF - j, + vs_proc_instantiate, nid, p)) + goto out; + pos++; + } + } +out: + filp->f_pos = pos; + return 0; +} + +static int proc_virtnet_getattr(struct vfsmount *mnt, + struct dentry *dentry, struct kstat *stat) +{ + struct inode *inode = dentry->d_inode; + + generic_fillattr(inode, stat); + stat->nlink = 2 + atomic_read(&nx_global_cactive); + return 0; +} + +static struct file_operations proc_virtnet_dir_operations = { + .read = generic_read_dir, + .readdir = proc_virtnet_readdir, +}; + +static struct inode_operations proc_virtnet_dir_inode_operations = { + .getattr = proc_virtnet_getattr, + .lookup = proc_virtnet_lookup, +}; + + + +void proc_vx_init(void) +{ + struct proc_dir_entry *ent; + + ent = proc_mkdir("virtual", 0); + if (ent) { + ent->proc_fops = &proc_virtual_dir_operations; + ent->proc_iops = &proc_virtual_dir_inode_operations; + } + proc_virtual = ent; + + ent = proc_mkdir("virtnet", 0); + if (ent) { + ent->proc_fops = &proc_virtnet_dir_operations; + ent->proc_iops = &proc_virtnet_dir_inode_operations; + } + proc_virtnet = ent; +} + + + + +/* per pid info */ + + +int proc_pid_vx_info(struct task_struct *p, char *buffer) +{ + struct vx_info *vxi; + char *orig = buffer; + + buffer += sprintf(buffer, "XID:\t%d\n", vx_task_xid(p)); + + vxi = task_get_vx_info(p); + if (!vxi) + goto out; + + buffer += sprintf(buffer, "BCaps:\t"); + buffer = print_cap_t(buffer, &vxi->vx_bcaps); + buffer += sprintf(buffer, "\n"); + buffer += sprintf(buffer, "CCaps:\t%016llx\n", + (unsigned long long)vxi->vx_ccaps); + buffer += sprintf(buffer, "CFlags:\t%016llx\n", + (unsigned long long)vxi->vx_flags); + buffer += sprintf(buffer, "CIPid:\t%d\n", vxi->vx_initpid); + + put_vx_info(vxi); +out: + return buffer - orig; +} + + +int proc_pid_nx_info(struct task_struct *p, char *buffer) +{ + struct nx_info *nxi; + struct nx_addr_v4 *v4a; +#ifdef CONFIG_IPV6 + struct nx_addr_v6 *v6a; +#endif + char *orig = buffer; + int i; + + buffer += sprintf(buffer, "NID:\t%d\n", nx_task_nid(p)); + + nxi = task_get_nx_info(p); + if (!nxi) + goto out; + + buffer += sprintf(buffer, "NCaps:\t%016llx\n", + (unsigned long long)nxi->nx_ncaps); + buffer += sprintf(buffer, "NFlags:\t%016llx\n", + (unsigned long long)nxi->nx_flags); + + buffer += sprintf(buffer, + "V4Root[bcast]:\t" NIPQUAD_FMT "\n", + NIPQUAD(nxi->v4_bcast.s_addr)); + buffer += sprintf (buffer, + "V4Root[lback]:\t" NIPQUAD_FMT "\n", + NIPQUAD(nxi->v4_lback.s_addr)); + if (!NX_IPV4(nxi)) + goto skip_v4; + for (i = 0, v4a = &nxi->v4; v4a; i++, v4a = v4a->next) + buffer += sprintf(buffer, "V4Root[%d]:\t" NXAV4_FMT "\n", + i, NXAV4(v4a)); +skip_v4: +#ifdef CONFIG_IPV6 + if (!NX_IPV6(nxi)) + goto skip_v6; + for (i = 0, v6a = &nxi->v6; v6a; i++, v6a = v6a->next) + buffer += sprintf(buffer, "V6Root[%d]:\t" NXAV6_FMT "\n", + i, NXAV6(v6a)); +skip_v6: +#endif + put_nx_info(nxi); +out: + return buffer - orig; +} + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/sched.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/sched.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/sched.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/sched.c 2012-01-16 14:51:22.053408225 +0100 @@ -0,0 +1,414 @@ +/* + * linux/kernel/vserver/sched.c + * + * Virtual Server: Scheduler Support + * + * Copyright (C) 2004-2007 Herbert Pötzl + * + * V0.01 adapted Sam Vilains version to 2.6.3 + * V0.02 removed legacy interface + * V0.03 changed vcmds to vxi arg + * V0.04 removed older and legacy interfaces + * + */ + +#include +#include +#include + +#include + + +#define vxd_check_range(val, min, max) do { \ + vxlprintk((val < min) || (val > max), \ + "check_range(%ld,%ld,%ld)", \ + (long)val, (long)min, (long)max, \ + __FILE__, __LINE__); \ + } while (0) + + +void vx_update_sched_param(struct _vx_sched *sched, + struct _vx_sched_pc *sched_pc) +{ + unsigned int set_mask = sched->update_mask; + + if (set_mask & VXSM_FILL_RATE) + sched_pc->fill_rate[0] = sched->fill_rate[0]; + if (set_mask & VXSM_INTERVAL) + sched_pc->interval[0] = sched->interval[0]; + if (set_mask & VXSM_FILL_RATE2) + sched_pc->fill_rate[1] = sched->fill_rate[1]; + if (set_mask & VXSM_INTERVAL2) + sched_pc->interval[1] = sched->interval[1]; + if (set_mask & VXSM_TOKENS) + sched_pc->tokens = sched->tokens; + if (set_mask & VXSM_TOKENS_MIN) + sched_pc->tokens_min = sched->tokens_min; + if (set_mask & VXSM_TOKENS_MAX) + sched_pc->tokens_max = sched->tokens_max; + if (set_mask & VXSM_PRIO_BIAS) + sched_pc->prio_bias = sched->prio_bias; + + if (set_mask & VXSM_IDLE_TIME) + sched_pc->flags |= VXSF_IDLE_TIME; + else + sched_pc->flags &= ~VXSF_IDLE_TIME; + + /* reset time */ + sched_pc->norm_time = jiffies; +} + + +/* + * recalculate the context's scheduling tokens + * + * ret > 0 : number of tokens available + * ret < 0 : on hold, check delta_min[] + * -1 only jiffies + * -2 also idle time + * + */ +int vx_tokens_recalc(struct _vx_sched_pc *sched_pc, + unsigned long *norm_time, unsigned long *idle_time, int delta_min[2]) +{ + long delta; + long tokens = 0; + int flags = sched_pc->flags; + + /* how much time did pass? */ + delta = *norm_time - sched_pc->norm_time; + // printk("@ %ld, %ld, %ld\n", *norm_time, sched_pc->norm_time, jiffies); + vxd_check_range(delta, 0, INT_MAX); + + if (delta >= sched_pc->interval[0]) { + long tokens, integral; + + /* calc integral token part */ + tokens = delta / sched_pc->interval[0]; + integral = tokens * sched_pc->interval[0]; + tokens *= sched_pc->fill_rate[0]; +#ifdef CONFIG_VSERVER_HARDCPU + delta_min[0] = delta - integral; + vxd_check_range(delta_min[0], 0, sched_pc->interval[0]); +#endif + /* advance time */ + sched_pc->norm_time += delta; + + /* add tokens */ + sched_pc->tokens += tokens; + sched_pc->token_time += tokens; + } else + delta_min[0] = delta; + +#ifdef CONFIG_VSERVER_IDLETIME + if (!(flags & VXSF_IDLE_TIME)) + goto skip_idle; + + /* how much was the idle skip? */ + delta = *idle_time - sched_pc->idle_time; + vxd_check_range(delta, 0, INT_MAX); + + if (delta >= sched_pc->interval[1]) { + long tokens, integral; + + /* calc fair share token part */ + tokens = delta / sched_pc->interval[1]; + integral = tokens * sched_pc->interval[1]; + tokens *= sched_pc->fill_rate[1]; + delta_min[1] = delta - integral; + vxd_check_range(delta_min[1], 0, sched_pc->interval[1]); + + /* advance idle time */ + sched_pc->idle_time += integral; + + /* add tokens */ + sched_pc->tokens += tokens; + sched_pc->token_time += tokens; + } else + delta_min[1] = delta; +skip_idle: +#endif + + /* clip at maximum */ + if (sched_pc->tokens > sched_pc->tokens_max) + sched_pc->tokens = sched_pc->tokens_max; + tokens = sched_pc->tokens; + + if ((flags & VXSF_ONHOLD)) { + /* can we unhold? */ + if (tokens >= sched_pc->tokens_min) { + flags &= ~VXSF_ONHOLD; + sched_pc->hold_ticks += + *norm_time - sched_pc->onhold; + } else + goto on_hold; + } else { + /* put on hold? */ + if (tokens <= 0) { + flags |= VXSF_ONHOLD; + sched_pc->onhold = *norm_time; + goto on_hold; + } + } + sched_pc->flags = flags; + return tokens; + +on_hold: + tokens = sched_pc->tokens_min - tokens; + sched_pc->flags = flags; + // BUG_ON(tokens < 0); probably doesn't hold anymore + +#ifdef CONFIG_VSERVER_HARDCPU + /* next interval? */ + if (!sched_pc->fill_rate[0]) + delta_min[0] = HZ; + else if (tokens > sched_pc->fill_rate[0]) + delta_min[0] += sched_pc->interval[0] * + tokens / sched_pc->fill_rate[0]; + else + delta_min[0] = sched_pc->interval[0] - delta_min[0]; + vxd_check_range(delta_min[0], 0, INT_MAX); + +#ifdef CONFIG_VSERVER_IDLETIME + if (!(flags & VXSF_IDLE_TIME)) + return -1; + + /* next interval? */ + if (!sched_pc->fill_rate[1]) + delta_min[1] = HZ; + else if (tokens > sched_pc->fill_rate[1]) + delta_min[1] += sched_pc->interval[1] * + tokens / sched_pc->fill_rate[1]; + else + delta_min[1] = sched_pc->interval[1] - delta_min[1]; + vxd_check_range(delta_min[1], 0, INT_MAX); + + return -2; +#else + return -1; +#endif /* CONFIG_VSERVER_IDLETIME */ +#else + return 0; +#endif /* CONFIG_VSERVER_HARDCPU */ +} + +static inline unsigned long msec_to_ticks(unsigned long msec) +{ + return msecs_to_jiffies(msec); +} + +static inline unsigned long ticks_to_msec(unsigned long ticks) +{ + return jiffies_to_msecs(ticks); +} + +static inline unsigned long ticks_to_usec(unsigned long ticks) +{ + return jiffies_to_usecs(ticks); +} + + +static int do_set_sched(struct vx_info *vxi, struct vcmd_sched_v5 *data) +{ + unsigned int set_mask = data->mask; + unsigned int update_mask; + int i, cpu; + + /* Sanity check data values */ + if (data->tokens_max <= 0) + data->tokens_max = HZ; + if (data->tokens_min < 0) + data->tokens_min = HZ / 3; + if (data->tokens_min >= data->tokens_max) + data->tokens_min = data->tokens_max; + + if (data->prio_bias > MAX_PRIO_BIAS) + data->prio_bias = MAX_PRIO_BIAS; + if (data->prio_bias < MIN_PRIO_BIAS) + data->prio_bias = MIN_PRIO_BIAS; + + spin_lock(&vxi->sched.tokens_lock); + + /* sync up on delayed updates */ + for_each_cpu_mask(cpu, vxi->sched.update) + vx_update_sched_param(&vxi->sched, + &vx_per_cpu(vxi, sched_pc, cpu)); + + if (set_mask & VXSM_FILL_RATE) + vxi->sched.fill_rate[0] = data->fill_rate[0]; + if (set_mask & VXSM_FILL_RATE2) + vxi->sched.fill_rate[1] = data->fill_rate[1]; + if (set_mask & VXSM_INTERVAL) + vxi->sched.interval[0] = (set_mask & VXSM_MSEC) ? + msec_to_ticks(data->interval[0]) : data->interval[0]; + if (set_mask & VXSM_INTERVAL2) + vxi->sched.interval[1] = (set_mask & VXSM_MSEC) ? + msec_to_ticks(data->interval[1]) : data->interval[1]; + if (set_mask & VXSM_TOKENS) + vxi->sched.tokens = data->tokens; + if (set_mask & VXSM_TOKENS_MIN) + vxi->sched.tokens_min = data->tokens_min; + if (set_mask & VXSM_TOKENS_MAX) + vxi->sched.tokens_max = data->tokens_max; + if (set_mask & VXSM_PRIO_BIAS) + vxi->sched.prio_bias = data->prio_bias; + + /* Sanity check rate/interval */ + for (i = 0; i < 2; i++) { + if (data->fill_rate[i] < 0) + data->fill_rate[i] = 0; + if (data->interval[i] <= 0) + data->interval[i] = HZ; + } + + update_mask = vxi->sched.update_mask & VXSM_SET_MASK; + update_mask |= (set_mask & (VXSM_SET_MASK | VXSM_IDLE_TIME)); + vxi->sched.update_mask = update_mask; + +#ifdef CONFIG_SMP + rmb(); + if (set_mask & VXSM_CPU_ID) { + vxi->sched.update = cpumask_of_cpu(data->cpu_id); + cpus_and(vxi->sched.update, cpu_online_map, + vxi->sched.update); + } else + vxi->sched.update = cpu_online_map; + + /* forced reload? */ + if (set_mask & VXSM_FORCE) { + for_each_cpu_mask(cpu, vxi->sched.update) + vx_update_sched_param(&vxi->sched, + &vx_per_cpu(vxi, sched_pc, cpu)); + vxi->sched.update = CPU_MASK_NONE; + } +#else + /* on UP we update immediately */ + vx_update_sched_param(&vxi->sched, + &vx_per_cpu(vxi, sched_pc, 0)); +#endif + + spin_unlock(&vxi->sched.tokens_lock); + return 0; +} + + +#define COPY_IDS(C) C(cpu_id); C(bucket_id) +#define COPY_PRI(C) C(prio_bias) +#define COPY_TOK(C) C(tokens); C(tokens_min); C(tokens_max) +#define COPY_FRI(C) C(fill_rate[0]); C(interval[0]); \ + C(fill_rate[1]); C(interval[1]); + +#define COPY_VALUE(name) vc_data.name = data->name + +static int do_set_sched_v4(struct vx_info *vxi, struct vcmd_set_sched_v4 *data) +{ + struct vcmd_sched_v5 vc_data; + + vc_data.mask = data->set_mask; + COPY_IDS(COPY_VALUE); + COPY_PRI(COPY_VALUE); + COPY_TOK(COPY_VALUE); + vc_data.fill_rate[0] = vc_data.fill_rate[1] = data->fill_rate; + vc_data.interval[0] = vc_data.interval[1] = data->interval; + return do_set_sched(vxi, &vc_data); +} + +int vc_set_sched_v4(struct vx_info *vxi, void __user *data) +{ + struct vcmd_set_sched_v4 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_set_sched_v4(vxi, &vc_data); +} + + /* latest interface is v5 */ + +int vc_set_sched(struct vx_info *vxi, void __user *data) +{ + struct vcmd_sched_v5 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_set_sched(vxi, &vc_data); +} + + +#define COPY_PRI(C) C(prio_bias) +#define COPY_TOK(C) C(tokens); C(tokens_min); C(tokens_max) +#define COPY_FRI(C) C(fill_rate[0]); C(interval[0]); \ + C(fill_rate[1]); C(interval[1]); + +#define COPY_VALUE(name) vc_data.name = data->name + + +int vc_get_sched(struct vx_info *vxi, void __user *data) +{ + struct vcmd_sched_v5 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + if (vc_data.mask & VXSM_CPU_ID) { + int cpu = vc_data.cpu_id; + struct _vx_sched_pc *data; + + if (!cpu_possible(cpu)) + return -EINVAL; + + data = &vx_per_cpu(vxi, sched_pc, cpu); + COPY_TOK(COPY_VALUE); + COPY_PRI(COPY_VALUE); + COPY_FRI(COPY_VALUE); + + if (data->flags & VXSF_IDLE_TIME) + vc_data.mask |= VXSM_IDLE_TIME; + } else { + struct _vx_sched *data = &vxi->sched; + + COPY_TOK(COPY_VALUE); + COPY_PRI(COPY_VALUE); + COPY_FRI(COPY_VALUE); + } + + if (vc_data.mask & VXSM_MSEC) { + vc_data.interval[0] = ticks_to_msec(vc_data.interval[0]); + vc_data.interval[1] = ticks_to_msec(vc_data.interval[1]); + } + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + + +int vc_sched_info(struct vx_info *vxi, void __user *data) +{ + struct vcmd_sched_info vc_data; + int cpu; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + cpu = vc_data.cpu_id; + if (!cpu_possible(cpu)) + return -EINVAL; + + if (vxi) { + struct _vx_sched_pc *sched_pc = + &vx_per_cpu(vxi, sched_pc, cpu); + + vc_data.user_msec = ticks_to_msec(sched_pc->user_ticks); + vc_data.sys_msec = ticks_to_msec(sched_pc->sys_ticks); + vc_data.hold_msec = ticks_to_msec(sched_pc->hold_ticks); + vc_data.vavavoom = sched_pc->vavavoom; + } + vc_data.token_usec = ticks_to_usec(1); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/sched_init.h kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/sched_init.h --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/sched_init.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/sched_init.h 2012-01-16 14:51:22.053408225 +0100 @@ -0,0 +1,50 @@ + +static inline void vx_info_init_sched(struct _vx_sched *sched) +{ + static struct lock_class_key tokens_lock_key; + + /* scheduling; hard code starting values as constants */ + sched->fill_rate[0] = 1; + sched->interval[0] = 4; + sched->fill_rate[1] = 1; + sched->interval[1] = 8; + sched->tokens = HZ >> 2; + sched->tokens_min = HZ >> 4; + sched->tokens_max = HZ >> 1; + sched->tokens_lock = SPIN_LOCK_UNLOCKED; + sched->prio_bias = 0; + + lockdep_set_class(&sched->tokens_lock, &tokens_lock_key); +} + +static inline +void vx_info_init_sched_pc(struct _vx_sched_pc *sched_pc, int cpu) +{ + sched_pc->fill_rate[0] = 1; + sched_pc->interval[0] = 4; + sched_pc->fill_rate[1] = 1; + sched_pc->interval[1] = 8; + sched_pc->tokens = HZ >> 2; + sched_pc->tokens_min = HZ >> 4; + sched_pc->tokens_max = HZ >> 1; + sched_pc->prio_bias = 0; + sched_pc->vavavoom = 0; + sched_pc->token_time = 0; + sched_pc->idle_time = 0; + sched_pc->norm_time = jiffies; + + sched_pc->user_ticks = 0; + sched_pc->sys_ticks = 0; + sched_pc->hold_ticks = 0; +} + +static inline void vx_info_exit_sched(struct _vx_sched *sched) +{ + return; +} + +static inline +void vx_info_exit_sched_pc(struct _vx_sched_pc *sched_pc, int cpu) +{ + return; +} diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/sched_proc.h kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/sched_proc.h --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/sched_proc.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/sched_proc.h 2012-01-16 14:51:22.053408225 +0100 @@ -0,0 +1,57 @@ +#ifndef _VX_SCHED_PROC_H +#define _VX_SCHED_PROC_H + + +static inline +int vx_info_proc_sched(struct _vx_sched *sched, char *buffer) +{ + int length = 0; + + length += sprintf(buffer, + "FillRate:\t%8d,%d\n" + "Interval:\t%8d,%d\n" + "TokensMin:\t%8d\n" + "TokensMax:\t%8d\n" + "PrioBias:\t%8d\n", + sched->fill_rate[0], + sched->fill_rate[1], + sched->interval[0], + sched->interval[1], + sched->tokens_min, + sched->tokens_max, + sched->prio_bias); + return length; +} + +static inline +int vx_info_proc_sched_pc(struct _vx_sched_pc *sched_pc, + char *buffer, int cpu) +{ + int length = 0; + + length += sprintf(buffer + length, + "cpu %d: %lld %lld %lld %ld %ld", cpu, + (unsigned long long)sched_pc->user_ticks, + (unsigned long long)sched_pc->sys_ticks, + (unsigned long long)sched_pc->hold_ticks, + sched_pc->token_time, + sched_pc->idle_time); + length += sprintf(buffer + length, + " %c%c %d %d %d %d/%d %d/%d", + (sched_pc->flags & VXSF_ONHOLD) ? 'H' : 'R', + (sched_pc->flags & VXSF_IDLE_TIME) ? 'I' : '-', + sched_pc->tokens, + sched_pc->tokens_min, + sched_pc->tokens_max, + sched_pc->fill_rate[0], + sched_pc->interval[0], + sched_pc->fill_rate[1], + sched_pc->interval[1]); + length += sprintf(buffer + length, + " %d %d\n", + sched_pc->prio_bias, + sched_pc->vavavoom); + return length; +} + +#endif /* _VX_SCHED_PROC_H */ diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/signal.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/signal.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/signal.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/signal.c 2012-01-16 14:51:22.053408225 +0100 @@ -0,0 +1,132 @@ +/* + * linux/kernel/vserver/signal.c + * + * Virtual Server: Signal Support + * + * Copyright (C) 2003-2007 Herbert Pötzl + * + * V0.01 broken out from vcontext V0.05 + * V0.02 changed vcmds to vxi arg + * V0.03 adjusted siginfo for kill + * + */ + +#include + +#include +#include +#include + + +int vx_info_kill(struct vx_info *vxi, int pid, int sig) +{ + int retval, count = 0; + struct task_struct *p; + struct siginfo *sip = SEND_SIG_PRIV; + + retval = -ESRCH; + vxdprintk(VXD_CBIT(misc, 4), + "vx_info_kill(%p[#%d],%d,%d)*", + vxi, vxi->vx_id, pid, sig); + read_lock(&tasklist_lock); + switch (pid) { + case 0: + case -1: + for_each_process(p) { + int err = 0; + + if (vx_task_xid(p) != vxi->vx_id || p->pid <= 1 || + (pid && vxi->vx_initpid == p->pid)) + continue; + + err = group_send_sig_info(sig, sip, p); + ++count; + if (err != -EPERM) + retval = err; + } + break; + + case 1: + if (vxi->vx_initpid) { + pid = vxi->vx_initpid; + /* for now, only SIGINT to private init ... */ + if (!vx_info_flags(vxi, VXF_STATE_ADMIN, 0) && + /* ... as long as there are tasks left */ + (atomic_read(&vxi->vx_tasks) > 1)) + sig = SIGINT; + } + /* fallthrough */ + default: + p = find_task_by_real_pid(pid); + if (p) { + if (vx_task_xid(p) == vxi->vx_id) + retval = group_send_sig_info(sig, sip, p); + } + break; + } + read_unlock(&tasklist_lock); + vxdprintk(VXD_CBIT(misc, 4), + "vx_info_kill(%p[#%d],%d,%d,%ld) = %d", + vxi, vxi->vx_id, pid, sig, (long)sip, retval); + return retval; +} + +int vc_ctx_kill(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_kill_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + /* special check to allow guest shutdown */ + if (!vx_info_flags(vxi, VXF_STATE_ADMIN, 0) && + /* forbid killall pid=0 when init is present */ + (((vc_data.pid < 1) && vxi->vx_initpid) || + (vc_data.pid > 1))) + return -EACCES; + + return vx_info_kill(vxi, vc_data.pid, vc_data.sig); +} + + +static int __wait_exit(struct vx_info *vxi) +{ + DECLARE_WAITQUEUE(wait, current); + int ret = 0; + + add_wait_queue(&vxi->vx_wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + +wait: + if (vx_info_state(vxi, + VXS_SHUTDOWN | VXS_HASHED | VXS_HELPER) == VXS_SHUTDOWN) + goto out; + if (signal_pending(current)) { + ret = -ERESTARTSYS; + goto out; + } + schedule(); + goto wait; + +out: + set_current_state(TASK_RUNNING); + remove_wait_queue(&vxi->vx_wait, &wait); + return ret; +} + + + +int vc_wait_exit(struct vx_info *vxi, void __user *data) +{ + struct vcmd_wait_exit_v0 vc_data; + int ret; + + ret = __wait_exit(vxi); + vc_data.reboot_cmd = vxi->reboot_cmd; + vc_data.exit_code = vxi->exit_code; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + ret = -EFAULT; + return ret; +} + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/space.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/space.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/space.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/space.c 2012-01-16 14:51:22.053408225 +0100 @@ -0,0 +1,375 @@ +/* + * linux/kernel/vserver/space.c + * + * Virtual Server: Context Space Support + * + * Copyright (C) 2003-2007 Herbert Pötzl + * + * V0.01 broken out from context.c 0.07 + * V0.02 added task locking for namespace + * V0.03 broken out vx_enter_namespace + * V0.04 added *space support and commands + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +atomic_t vs_global_nsproxy = ATOMIC_INIT(0); +atomic_t vs_global_fs = ATOMIC_INIT(0); +atomic_t vs_global_mnt_ns = ATOMIC_INIT(0); +atomic_t vs_global_uts_ns = ATOMIC_INIT(0); +atomic_t vs_global_user_ns = ATOMIC_INIT(0); +atomic_t vs_global_pid_ns = ATOMIC_INIT(0); + + +/* namespace functions */ + +#include +#include +#include +#include +#include + + +static const struct vcmd_space_mask_v1 space_mask_v0 = { + .mask = CLONE_FS | + CLONE_NEWNS | + CLONE_NEWUTS | + CLONE_NEWIPC | + CLONE_NEWUSER | + 0 +}; + +static const struct vcmd_space_mask_v1 space_mask = { + .mask = CLONE_FS | + CLONE_NEWNS | + CLONE_NEWUTS | + CLONE_NEWIPC | + CLONE_NEWUSER | +#ifdef CONFIG_PID_NS + CLONE_NEWPID | +#endif +#ifdef CONFIG_NET_NS + CLONE_NEWNET | +#endif + 0 +}; + +static const struct vcmd_space_mask_v1 default_space_mask = { + .mask = CLONE_FS | + CLONE_NEWNS | + CLONE_NEWUTS | + CLONE_NEWIPC | + CLONE_NEWUSER | +#ifdef CONFIG_PID_NS +// CLONE_NEWPID | +#endif + 0 +}; + +/* + * build a new nsproxy mix + * assumes that both proxies are 'const' + * does not touch nsproxy refcounts + * will hold a reference on the result. + */ + +struct nsproxy *vs_mix_nsproxy(struct nsproxy *old_nsproxy, + struct nsproxy *new_nsproxy, unsigned long mask) +{ + struct mnt_namespace *old_ns; + struct uts_namespace *old_uts; + struct ipc_namespace *old_ipc; +#ifdef CONFIG_PID_NS + struct pid_namespace *old_pid; +#endif +#ifdef CONFIG_NET_NS + struct net *old_net; +#endif + struct nsproxy *nsproxy; + + nsproxy = copy_nsproxy(old_nsproxy); + if (!nsproxy) + goto out; + + if (mask & CLONE_NEWNS) { + old_ns = nsproxy->mnt_ns; + nsproxy->mnt_ns = new_nsproxy->mnt_ns; + if (nsproxy->mnt_ns) + get_mnt_ns(nsproxy->mnt_ns); + } else + old_ns = NULL; + + if (mask & CLONE_NEWUTS) { + old_uts = nsproxy->uts_ns; + nsproxy->uts_ns = new_nsproxy->uts_ns; + if (nsproxy->uts_ns) + get_uts_ns(nsproxy->uts_ns); + } else + old_uts = NULL; + + if (mask & CLONE_NEWIPC) { + old_ipc = nsproxy->ipc_ns; + nsproxy->ipc_ns = new_nsproxy->ipc_ns; + if (nsproxy->ipc_ns) + get_ipc_ns(nsproxy->ipc_ns); + } else + old_ipc = NULL; + +#ifdef CONFIG_PID_NS + if (mask & CLONE_NEWPID) { + old_pid = nsproxy->pid_ns; + nsproxy->pid_ns = new_nsproxy->pid_ns; + if (nsproxy->pid_ns) + get_pid_ns(nsproxy->pid_ns); + } else + old_pid = NULL; +#endif +#ifdef CONFIG_NET_NS + if (mask & CLONE_NEWNET) { + old_net = nsproxy->net_ns; + nsproxy->net_ns = new_nsproxy->net_ns; + if (nsproxy->net_ns) + get_net(nsproxy->net_ns); + } else + old_net = NULL; +#endif + if (old_ns) + put_mnt_ns(old_ns); + if (old_uts) + put_uts_ns(old_uts); + if (old_ipc) + put_ipc_ns(old_ipc); +#ifdef CONFIG_PID_NS + if (old_pid) + put_pid_ns(old_pid); +#endif +#ifdef CONFIG_NET_NS + if (old_net) + put_net(old_net); +#endif +out: + return nsproxy; +} + + +/* + * merge two nsproxy structs into a new one. + * will hold a reference on the result. + */ + +static inline +struct nsproxy *__vs_merge_nsproxy(struct nsproxy *old, + struct nsproxy *proxy, unsigned long mask) +{ + struct nsproxy null_proxy = { .mnt_ns = NULL }; + + if (!proxy) + return NULL; + + if (mask) { + /* vs_mix_nsproxy returns with reference */ + return vs_mix_nsproxy(old ? old : &null_proxy, + proxy, mask); + } + get_nsproxy(proxy); + return proxy; +} + + +int vx_enter_space(struct vx_info *vxi, unsigned long mask, unsigned index) +{ + struct nsproxy *proxy, *proxy_cur, *proxy_new; + struct fs_struct *fs_cur, *fs = NULL; + int ret, kill = 0; + + vxdprintk(VXD_CBIT(space, 8), "vx_enter_space(%p[#%u],0x%08lx,%d)", + vxi, vxi->vx_id, mask, index); + + if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0)) + return -EACCES; + + if (!mask) + mask = vxi->vx_nsmask[index]; + + if ((mask & vxi->vx_nsmask[index]) != mask) + return -EINVAL; + + if (mask & CLONE_FS) { + fs = copy_fs_struct(vxi->vx_fs[index]); + if (!fs) + return -ENOMEM; + } + proxy = vxi->vx_nsproxy[index]; + + vxdprintk(VXD_CBIT(space, 9), + "vx_enter_space(%p[#%u],0x%08lx,%d) -> (%p,%p)", + vxi, vxi->vx_id, mask, index, proxy, fs); + + task_lock(current); + fs_cur = current->fs; + + if (mask & CLONE_FS) { + write_lock(&fs_cur->lock); + current->fs = fs; + kill = !--fs_cur->users; + write_unlock(&fs_cur->lock); + } + + proxy_cur = current->nsproxy; + get_nsproxy(proxy_cur); + task_unlock(current); + + if (kill) + free_fs_struct(fs_cur); + + proxy_new = __vs_merge_nsproxy(proxy_cur, proxy, mask); + if (IS_ERR(proxy_new)) { + ret = PTR_ERR(proxy_new); + goto out_put; + } + + proxy_new = xchg(¤t->nsproxy, proxy_new); + ret = 0; + + if (proxy_new) + put_nsproxy(proxy_new); +out_put: + if (proxy_cur) + put_nsproxy(proxy_cur); + return ret; +} + + +int vx_set_space(struct vx_info *vxi, unsigned long mask, unsigned index) +{ + struct nsproxy *proxy_vxi, *proxy_cur, *proxy_new; + struct fs_struct *fs_vxi, *fs; + int ret, kill = 0; + + vxdprintk(VXD_CBIT(space, 8), "vx_set_space(%p[#%u],0x%08lx,%d)", + vxi, vxi->vx_id, mask, index); +#if 0 + if (!mask) + mask = default_space_mask.mask; +#endif + if ((mask & space_mask.mask) != mask) + return -EINVAL; + + proxy_vxi = vxi->vx_nsproxy[index]; + fs_vxi = vxi->vx_fs[index]; + + if (mask & CLONE_FS) { + fs = copy_fs_struct(current->fs); + if (!fs) + return -ENOMEM; + } + + task_lock(current); + + if (mask & CLONE_FS) { + write_lock(&fs_vxi->lock); + vxi->vx_fs[index] = fs; + kill = !--fs_vxi->users; + write_unlock(&fs_vxi->lock); + } + + proxy_cur = current->nsproxy; + get_nsproxy(proxy_cur); + task_unlock(current); + + if (kill) + free_fs_struct(fs_vxi); + + proxy_new = __vs_merge_nsproxy(proxy_vxi, proxy_cur, mask); + if (IS_ERR(proxy_new)) { + ret = PTR_ERR(proxy_new); + goto out_put; + } + + proxy_new = xchg(&vxi->vx_nsproxy[index], proxy_new); + vxi->vx_nsmask[index] |= mask; + ret = 0; + + if (proxy_new) + put_nsproxy(proxy_new); +out_put: + if (proxy_cur) + put_nsproxy(proxy_cur); + return ret; +} + + +int vc_enter_space_v1(struct vx_info *vxi, void __user *data) +{ + struct vcmd_space_mask_v1 vc_data = { .mask = 0 }; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return vx_enter_space(vxi, vc_data.mask, 0); +} + +int vc_enter_space(struct vx_info *vxi, void __user *data) +{ + struct vcmd_space_mask_v2 vc_data = { .mask = 0 }; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + if (vc_data.index >= VX_SPACES) + return -EINVAL; + + return vx_enter_space(vxi, vc_data.mask, vc_data.index); +} + +int vc_set_space_v1(struct vx_info *vxi, void __user *data) +{ + struct vcmd_space_mask_v1 vc_data = { .mask = 0 }; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return vx_set_space(vxi, vc_data.mask, 0); +} + +int vc_set_space(struct vx_info *vxi, void __user *data) +{ + struct vcmd_space_mask_v2 vc_data = { .mask = 0 }; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + if (vc_data.index >= VX_SPACES) + return -EINVAL; + + return vx_set_space(vxi, vc_data.mask, vc_data.index); +} + +int vc_get_space_mask(void __user *data, int type) +{ + const struct vcmd_space_mask_v1 *mask; + + if (type == 0) + mask = &space_mask_v0; + else if (type == 1) + mask = &space_mask; + else + mask = &default_space_mask; + + vxdprintk(VXD_CBIT(space, 10), + "vc_get_space_mask(%d) = %08llx", type, mask->mask); + + if (copy_to_user(data, mask, sizeof(*mask))) + return -EFAULT; + return 0; +} + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/switch.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/switch.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/switch.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/switch.c 2012-01-16 14:51:22.053408225 +0100 @@ -0,0 +1,546 @@ +/* + * linux/kernel/vserver/switch.c + * + * Virtual Server: Syscall Switch + * + * Copyright (C) 2003-2007 Herbert Pötzl + * + * V0.01 syscall switch + * V0.02 added signal to context + * V0.03 added rlimit functions + * V0.04 added iattr, task/xid functions + * V0.05 added debug/history stuff + * V0.06 added compat32 layer + * V0.07 vcmd args and perms + * V0.08 added status commands + * V0.09 added tag commands + * V0.10 added oom bias + * V0.11 added device commands + * + */ + +#include +#include +#include + +#include "vci_config.h" + + +static inline +int vc_get_version(uint32_t id) +{ + return VCI_VERSION; +} + +static inline +int vc_get_vci(uint32_t id) +{ + return vci_kernel_config(); +} + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + +#ifdef CONFIG_COMPAT +#define __COMPAT(name, id, data, compat) \ + (compat) ? name ## _x32(id, data) : name(id, data) +#define __COMPAT_NO_ID(name, data, compat) \ + (compat) ? name ## _x32(data) : name(data) +#else +#define __COMPAT(name, id, data, compat) \ + name(id, data) +#define __COMPAT_NO_ID(name, data, compat) \ + name(data) +#endif + + +static inline +long do_vcmd(uint32_t cmd, uint32_t id, + struct vx_info *vxi, struct nx_info *nxi, + void __user *data, int compat) +{ + switch (cmd) { + + case VCMD_get_version: + return vc_get_version(id); + case VCMD_get_vci: + return vc_get_vci(id); + + case VCMD_task_xid: + return vc_task_xid(id); + case VCMD_vx_info: + return vc_vx_info(vxi, data); + + case VCMD_task_nid: + return vc_task_nid(id); + case VCMD_nx_info: + return vc_nx_info(nxi, data); + + case VCMD_task_tag: + return vc_task_tag(id); + + case VCMD_set_space_v1: + return vc_set_space_v1(vxi, data); + /* this is version 2 */ + case VCMD_set_space: + return vc_set_space(vxi, data); + + case VCMD_get_space_mask_v0: + return vc_get_space_mask(data, 0); + /* this is version 1 */ + case VCMD_get_space_mask: + return vc_get_space_mask(data, 1); + + case VCMD_get_space_default: + return vc_get_space_mask(data, -1); + +#ifdef CONFIG_IA32_EMULATION + case VCMD_get_rlimit: + return __COMPAT(vc_get_rlimit, vxi, data, compat); + case VCMD_set_rlimit: + return __COMPAT(vc_set_rlimit, vxi, data, compat); +#else + case VCMD_get_rlimit: + return vc_get_rlimit(vxi, data); + case VCMD_set_rlimit: + return vc_set_rlimit(vxi, data); +#endif + case VCMD_get_rlimit_mask: + return vc_get_rlimit_mask(id, data); + case VCMD_reset_hits: + return vc_reset_hits(vxi, data); + case VCMD_reset_minmax: + return vc_reset_minmax(vxi, data); + + case VCMD_get_vhi_name: + return vc_get_vhi_name(vxi, data); + case VCMD_set_vhi_name: + return vc_set_vhi_name(vxi, data); + + case VCMD_ctx_stat: + return vc_ctx_stat(vxi, data); + case VCMD_virt_stat: + return vc_virt_stat(vxi, data); + case VCMD_sock_stat: + return vc_sock_stat(vxi, data); + case VCMD_rlimit_stat: + return vc_rlimit_stat(vxi, data); + + case VCMD_set_cflags: + return vc_set_cflags(vxi, data); + case VCMD_get_cflags: + return vc_get_cflags(vxi, data); + + /* this is version 1 */ + case VCMD_set_ccaps: + return vc_set_ccaps(vxi, data); + /* this is version 1 */ + case VCMD_get_ccaps: + return vc_get_ccaps(vxi, data); + case VCMD_set_bcaps: + return vc_set_bcaps(vxi, data); + case VCMD_get_bcaps: + return vc_get_bcaps(vxi, data); + + case VCMD_set_badness: + return vc_set_badness(vxi, data); + case VCMD_get_badness: + return vc_get_badness(vxi, data); + + case VCMD_set_nflags: + return vc_set_nflags(nxi, data); + case VCMD_get_nflags: + return vc_get_nflags(nxi, data); + + case VCMD_set_ncaps: + return vc_set_ncaps(nxi, data); + case VCMD_get_ncaps: + return vc_get_ncaps(nxi, data); + + case VCMD_set_sched_v4: + return vc_set_sched_v4(vxi, data); + /* this is version 5 */ + case VCMD_set_sched: + return vc_set_sched(vxi, data); + case VCMD_get_sched: + return vc_get_sched(vxi, data); + case VCMD_sched_info: + return vc_sched_info(vxi, data); + + case VCMD_add_dlimit: + return __COMPAT(vc_add_dlimit, id, data, compat); + case VCMD_rem_dlimit: + return __COMPAT(vc_rem_dlimit, id, data, compat); + case VCMD_set_dlimit: + return __COMPAT(vc_set_dlimit, id, data, compat); + case VCMD_get_dlimit: + return __COMPAT(vc_get_dlimit, id, data, compat); + + case VCMD_ctx_kill: + return vc_ctx_kill(vxi, data); + + case VCMD_wait_exit: + return vc_wait_exit(vxi, data); + + case VCMD_get_iattr: + return __COMPAT_NO_ID(vc_get_iattr, data, compat); + case VCMD_set_iattr: + return __COMPAT_NO_ID(vc_set_iattr, data, compat); + + case VCMD_fget_iattr: + return vc_fget_iattr(id, data); + case VCMD_fset_iattr: + return vc_fset_iattr(id, data); + + case VCMD_enter_space_v0: + return vc_enter_space_v1(vxi, NULL); + case VCMD_enter_space_v1: + return vc_enter_space_v1(vxi, data); + /* this is version 2 */ + case VCMD_enter_space: + return vc_enter_space(vxi, data); + + case VCMD_ctx_create_v0: + return vc_ctx_create(id, NULL); + case VCMD_ctx_create: + return vc_ctx_create(id, data); + case VCMD_ctx_migrate_v0: + return vc_ctx_migrate(vxi, NULL); + case VCMD_ctx_migrate: + return vc_ctx_migrate(vxi, data); + + case VCMD_net_create_v0: + return vc_net_create(id, NULL); + case VCMD_net_create: + return vc_net_create(id, data); + case VCMD_net_migrate: + return vc_net_migrate(nxi, data); + + case VCMD_tag_migrate: + return vc_tag_migrate(id); + + case VCMD_net_add: + return vc_net_add(nxi, data); + case VCMD_net_remove: + return vc_net_remove(nxi, data); + + case VCMD_net_add_ipv4: + return vc_net_add_ipv4(nxi, data); + case VCMD_net_remove_ipv4: + return vc_net_remove_ipv4(nxi, data); +#ifdef CONFIG_IPV6 + case VCMD_net_add_ipv6: + return vc_net_add_ipv6(nxi, data); + case VCMD_net_remove_ipv6: + return vc_net_remove_ipv6(nxi, data); +#endif +/* case VCMD_add_match_ipv4: + return vc_add_match_ipv4(nxi, data); + case VCMD_get_match_ipv4: + return vc_get_match_ipv4(nxi, data); +#ifdef CONFIG_IPV6 + case VCMD_add_match_ipv6: + return vc_add_match_ipv6(nxi, data); + case VCMD_get_match_ipv6: + return vc_get_match_ipv6(nxi, data); +#endif */ + +#ifdef CONFIG_VSERVER_DEVICE + case VCMD_set_mapping: + return __COMPAT(vc_set_mapping, vxi, data, compat); + case VCMD_unset_mapping: + return __COMPAT(vc_unset_mapping, vxi, data, compat); +#endif +#ifdef CONFIG_VSERVER_HISTORY + case VCMD_dump_history: + return vc_dump_history(id); + case VCMD_read_history: + return __COMPAT(vc_read_history, id, data, compat); +#endif +#ifdef CONFIG_VSERVER_MONITOR + case VCMD_read_monitor: + return __COMPAT(vc_read_monitor, id, data, compat); +#endif + default: + vxwprintk_task(1, "unimplemented VCMD_%02d_%d[%d]", + VC_CATEGORY(cmd), VC_COMMAND(cmd), VC_VERSION(cmd)); + } + return -ENOSYS; +} + + +#define __VCMD(vcmd, _perm, _args, _flags) \ + case VCMD_ ## vcmd: perm = _perm; \ + args = _args; flags = _flags; break + + +#define VCA_NONE 0x00 +#define VCA_VXI 0x01 +#define VCA_NXI 0x02 + +#define VCF_NONE 0x00 +#define VCF_INFO 0x01 +#define VCF_ADMIN 0x02 +#define VCF_ARES 0x06 /* includes admin */ +#define VCF_SETUP 0x08 + +#define VCF_ZIDOK 0x10 /* zero id okay */ + + +static inline +long do_vserver(uint32_t cmd, uint32_t id, void __user *data, int compat) +{ + long ret; + int permit = -1, state = 0; + int perm = -1, args = 0, flags = 0; + struct vx_info *vxi = NULL; + struct nx_info *nxi = NULL; + + switch (cmd) { + /* unpriviledged commands */ + __VCMD(get_version, 0, VCA_NONE, 0); + __VCMD(get_vci, 0, VCA_NONE, 0); + __VCMD(get_rlimit_mask, 0, VCA_NONE, 0); + __VCMD(get_space_mask_v0,0, VCA_NONE, 0); + __VCMD(get_space_mask, 0, VCA_NONE, 0); + __VCMD(get_space_default,0, VCA_NONE, 0); + + /* info commands */ + __VCMD(task_xid, 2, VCA_NONE, 0); + __VCMD(reset_hits, 2, VCA_VXI, 0); + __VCMD(reset_minmax, 2, VCA_VXI, 0); + __VCMD(vx_info, 3, VCA_VXI, VCF_INFO); + __VCMD(get_bcaps, 3, VCA_VXI, VCF_INFO); + __VCMD(get_ccaps, 3, VCA_VXI, VCF_INFO); + __VCMD(get_cflags, 3, VCA_VXI, VCF_INFO); + __VCMD(get_badness, 3, VCA_VXI, VCF_INFO); + __VCMD(get_vhi_name, 3, VCA_VXI, VCF_INFO); + __VCMD(get_rlimit, 3, VCA_VXI, VCF_INFO); + + __VCMD(ctx_stat, 3, VCA_VXI, VCF_INFO); + __VCMD(virt_stat, 3, VCA_VXI, VCF_INFO); + __VCMD(sock_stat, 3, VCA_VXI, VCF_INFO); + __VCMD(rlimit_stat, 3, VCA_VXI, VCF_INFO); + + __VCMD(task_nid, 2, VCA_NONE, 0); + __VCMD(nx_info, 3, VCA_NXI, VCF_INFO); + __VCMD(get_ncaps, 3, VCA_NXI, VCF_INFO); + __VCMD(get_nflags, 3, VCA_NXI, VCF_INFO); + + __VCMD(task_tag, 2, VCA_NONE, 0); + + __VCMD(get_iattr, 2, VCA_NONE, 0); + __VCMD(fget_iattr, 2, VCA_NONE, 0); + __VCMD(get_dlimit, 3, VCA_NONE, VCF_INFO); + __VCMD(get_sched, 3, VCA_VXI, VCF_INFO); + __VCMD(sched_info, 3, VCA_VXI, VCF_INFO | VCF_ZIDOK); + + /* lower admin commands */ + __VCMD(wait_exit, 4, VCA_VXI, VCF_INFO); + __VCMD(ctx_create_v0, 5, VCA_NONE, 0); + __VCMD(ctx_create, 5, VCA_NONE, 0); + __VCMD(ctx_migrate_v0, 5, VCA_VXI, VCF_ADMIN); + __VCMD(ctx_migrate, 5, VCA_VXI, VCF_ADMIN); + __VCMD(enter_space_v0, 5, VCA_VXI, VCF_ADMIN); + __VCMD(enter_space_v1, 5, VCA_VXI, VCF_ADMIN); + __VCMD(enter_space, 5, VCA_VXI, VCF_ADMIN); + + __VCMD(net_create_v0, 5, VCA_NONE, 0); + __VCMD(net_create, 5, VCA_NONE, 0); + __VCMD(net_migrate, 5, VCA_NXI, VCF_ADMIN); + + __VCMD(tag_migrate, 5, VCA_NONE, VCF_ADMIN); + + /* higher admin commands */ + __VCMD(ctx_kill, 6, VCA_VXI, VCF_ARES); + __VCMD(set_space_v1, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + __VCMD(set_space, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + + __VCMD(set_ccaps, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + __VCMD(set_bcaps, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + __VCMD(set_cflags, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + __VCMD(set_badness, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + + __VCMD(set_vhi_name, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + __VCMD(set_rlimit, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + __VCMD(set_sched, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + __VCMD(set_sched_v4, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + + __VCMD(set_ncaps, 7, VCA_NXI, VCF_ARES | VCF_SETUP); + __VCMD(set_nflags, 7, VCA_NXI, VCF_ARES | VCF_SETUP); + __VCMD(net_add, 8, VCA_NXI, VCF_ARES | VCF_SETUP); + __VCMD(net_remove, 8, VCA_NXI, VCF_ARES | VCF_SETUP); + __VCMD(net_add_ipv4, 8, VCA_NXI, VCF_ARES | VCF_SETUP); + __VCMD(net_remove_ipv4, 8, VCA_NXI, VCF_ARES | VCF_SETUP); +#ifdef CONFIG_IPV6 + __VCMD(net_add_ipv6, 8, VCA_NXI, VCF_ARES | VCF_SETUP); + __VCMD(net_remove_ipv6, 8, VCA_NXI, VCF_ARES | VCF_SETUP); +#endif + __VCMD(set_iattr, 7, VCA_NONE, 0); + __VCMD(fset_iattr, 7, VCA_NONE, 0); + __VCMD(set_dlimit, 7, VCA_NONE, VCF_ARES); + __VCMD(add_dlimit, 8, VCA_NONE, VCF_ARES); + __VCMD(rem_dlimit, 8, VCA_NONE, VCF_ARES); + +#ifdef CONFIG_VSERVER_DEVICE + __VCMD(set_mapping, 8, VCA_VXI, VCF_ARES|VCF_ZIDOK); + __VCMD(unset_mapping, 8, VCA_VXI, VCF_ARES|VCF_ZIDOK); +#endif + /* debug level admin commands */ +#ifdef CONFIG_VSERVER_HISTORY + __VCMD(dump_history, 9, VCA_NONE, 0); + __VCMD(read_history, 9, VCA_NONE, 0); +#endif +#ifdef CONFIG_VSERVER_MONITOR + __VCMD(read_monitor, 9, VCA_NONE, 0); +#endif + + default: + perm = -1; + } + + vxdprintk(VXD_CBIT(switch, 0), + "vc: VCMD_%02d_%d[%d], %d,%p [%d,%d,%x,%x]", + VC_CATEGORY(cmd), VC_COMMAND(cmd), + VC_VERSION(cmd), id, data, compat, + perm, args, flags); + + ret = -ENOSYS; + if (perm < 0) + goto out; + + state = 1; + if (!capable(CAP_CONTEXT)) + goto out; + + state = 2; + /* moved here from the individual commands */ + ret = -EPERM; + if ((perm > 1) && !capable(CAP_SYS_ADMIN)) + goto out; + + state = 3; + /* vcmd involves resource management */ + ret = -EPERM; + if ((flags & VCF_ARES) && !capable(CAP_SYS_RESOURCE)) + goto out; + + state = 4; + /* various legacy exceptions */ + switch (cmd) { + /* will go away when spectator is a cap */ + case VCMD_ctx_migrate_v0: + case VCMD_ctx_migrate: + if (id == 1) { + current->xid = 1; + ret = 1; + goto out; + } + break; + + /* will go away when spectator is a cap */ + case VCMD_net_migrate: + if (id == 1) { + current->nid = 1; + ret = 1; + goto out; + } + break; + } + + /* vcmds are fine by default */ + permit = 1; + + /* admin type vcmds require admin ... */ + if (flags & VCF_ADMIN) + permit = vx_check(0, VS_ADMIN) ? 1 : 0; + + /* ... but setup type vcmds override that */ + if (!permit && (flags & VCF_SETUP)) + permit = vx_flags(VXF_STATE_SETUP, 0) ? 2 : 0; + + state = 5; + ret = -EPERM; + if (!permit) + goto out; + + state = 6; + if (!id && (flags & VCF_ZIDOK)) + goto skip_id; + + ret = -ESRCH; + if (args & VCA_VXI) { + vxi = lookup_vx_info(id); + if (!vxi) + goto out; + + if ((flags & VCF_ADMIN) && + /* special case kill for shutdown */ + (cmd != VCMD_ctx_kill) && + /* can context be administrated? */ + !vx_info_flags(vxi, VXF_STATE_ADMIN, 0)) { + ret = -EACCES; + goto out_vxi; + } + } + state = 7; + if (args & VCA_NXI) { + nxi = lookup_nx_info(id); + if (!nxi) + goto out_vxi; + + if ((flags & VCF_ADMIN) && + /* can context be administrated? */ + !nx_info_flags(nxi, NXF_STATE_ADMIN, 0)) { + ret = -EACCES; + goto out_nxi; + } + } +skip_id: + state = 8; + ret = do_vcmd(cmd, id, vxi, nxi, data, compat); + +out_nxi: + if ((args & VCA_NXI) && nxi) + put_nx_info(nxi); +out_vxi: + if ((args & VCA_VXI) && vxi) + put_vx_info(vxi); +out: + vxdprintk(VXD_CBIT(switch, 1), + "vc: VCMD_%02d_%d[%d] = %08lx(%ld) [%d,%d]", + VC_CATEGORY(cmd), VC_COMMAND(cmd), + VC_VERSION(cmd), ret, ret, state, permit); + return ret; +} + +asmlinkage long +sys_vserver(uint32_t cmd, uint32_t id, void __user *data) +{ + return do_vserver(cmd, id, data, 0); +} + +#ifdef CONFIG_COMPAT + +asmlinkage long +sys32_vserver(uint32_t cmd, uint32_t id, void __user *data) +{ + return do_vserver(cmd, id, data, 1); +} + +#endif /* CONFIG_COMPAT */ diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/sysctl.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/sysctl.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/sysctl.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/sysctl.c 2012-01-16 14:51:22.053408225 +0100 @@ -0,0 +1,245 @@ +/* + * kernel/vserver/sysctl.c + * + * Virtual Context Support + * + * Copyright (C) 2004-2007 Herbert Pötzl + * + * V0.01 basic structure + * + */ + +#include +#include +#include +#include +#include + + +enum { + CTL_DEBUG_ERROR = 0, + CTL_DEBUG_SWITCH = 1, + CTL_DEBUG_XID, + CTL_DEBUG_NID, + CTL_DEBUG_TAG, + CTL_DEBUG_NET, + CTL_DEBUG_LIMIT, + CTL_DEBUG_CRES, + CTL_DEBUG_DLIM, + CTL_DEBUG_QUOTA, + CTL_DEBUG_CVIRT, + CTL_DEBUG_SPACE, + CTL_DEBUG_MISC, +}; + + +unsigned int vx_debug_switch = 0; +unsigned int vx_debug_xid = 0; +unsigned int vx_debug_nid = 0; +unsigned int vx_debug_tag = 0; +unsigned int vx_debug_net = 0; +unsigned int vx_debug_limit = 0; +unsigned int vx_debug_cres = 0; +unsigned int vx_debug_dlim = 0; +unsigned int vx_debug_quota = 0; +unsigned int vx_debug_cvirt = 0; +unsigned int vx_debug_space = 0; +unsigned int vx_debug_misc = 0; + + +static struct ctl_table_header *vserver_table_header; +static ctl_table vserver_root_table[]; + + +void vserver_register_sysctl(void) +{ + if (!vserver_table_header) { + vserver_table_header = register_sysctl_table(vserver_root_table); + } + +} + +void vserver_unregister_sysctl(void) +{ + if (vserver_table_header) { + unregister_sysctl_table(vserver_table_header); + vserver_table_header = NULL; + } +} + + +static int proc_dodebug(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + char tmpbuf[20], *p, c; + unsigned int value; + size_t left, len; + + if ((*ppos && !write) || !*lenp) { + *lenp = 0; + return 0; + } + + left = *lenp; + + if (write) { + if (!access_ok(VERIFY_READ, buffer, left)) + return -EFAULT; + p = (char *)buffer; + while (left && __get_user(c, p) >= 0 && isspace(c)) + left--, p++; + if (!left) + goto done; + + if (left > sizeof(tmpbuf) - 1) + return -EINVAL; + if (copy_from_user(tmpbuf, p, left)) + return -EFAULT; + tmpbuf[left] = '\0'; + + for (p = tmpbuf, value = 0; '0' <= *p && *p <= '9'; p++, left--) + value = 10 * value + (*p - '0'); + if (*p && !isspace(*p)) + return -EINVAL; + while (left && isspace(*p)) + left--, p++; + *(unsigned int *)table->data = value; + } else { + if (!access_ok(VERIFY_WRITE, buffer, left)) + return -EFAULT; + len = sprintf(tmpbuf, "%d", *(unsigned int *)table->data); + if (len > left) + len = left; + if (__copy_to_user(buffer, tmpbuf, len)) + return -EFAULT; + if ((left -= len) > 0) { + if (put_user('\n', (char *)buffer + len)) + return -EFAULT; + left--; + } + } + +done: + *lenp -= left; + *ppos += *lenp; + return 0; +} + +static int zero; + +#define CTL_ENTRY(ctl, name) \ + { \ + .ctl_name = ctl, \ + .procname = #name, \ + .data = &vx_ ## name, \ + .maxlen = sizeof(int), \ + .mode = 0644, \ + .proc_handler = &proc_dodebug, \ + .strategy = &sysctl_intvec, \ + .extra1 = &zero, \ + .extra2 = &zero, \ + } + +static ctl_table vserver_debug_table[] = { + CTL_ENTRY(CTL_DEBUG_SWITCH, debug_switch), + CTL_ENTRY(CTL_DEBUG_XID, debug_xid), + CTL_ENTRY(CTL_DEBUG_NID, debug_nid), + CTL_ENTRY(CTL_DEBUG_TAG, debug_tag), + CTL_ENTRY(CTL_DEBUG_NET, debug_net), + CTL_ENTRY(CTL_DEBUG_LIMIT, debug_limit), + CTL_ENTRY(CTL_DEBUG_CRES, debug_cres), + CTL_ENTRY(CTL_DEBUG_DLIM, debug_dlim), + CTL_ENTRY(CTL_DEBUG_QUOTA, debug_quota), + CTL_ENTRY(CTL_DEBUG_CVIRT, debug_cvirt), + CTL_ENTRY(CTL_DEBUG_SPACE, debug_space), + CTL_ENTRY(CTL_DEBUG_MISC, debug_misc), + { .ctl_name = 0 } +}; + +static ctl_table vserver_root_table[] = { + { + .ctl_name = CTL_VSERVER, + .procname = "vserver", + .mode = 0555, + .child = vserver_debug_table + }, + { .ctl_name = 0 } +}; + + +static match_table_t tokens = { + { CTL_DEBUG_SWITCH, "switch=%x" }, + { CTL_DEBUG_XID, "xid=%x" }, + { CTL_DEBUG_NID, "nid=%x" }, + { CTL_DEBUG_TAG, "tag=%x" }, + { CTL_DEBUG_NET, "net=%x" }, + { CTL_DEBUG_LIMIT, "limit=%x" }, + { CTL_DEBUG_CRES, "cres=%x" }, + { CTL_DEBUG_DLIM, "dlim=%x" }, + { CTL_DEBUG_QUOTA, "quota=%x" }, + { CTL_DEBUG_CVIRT, "cvirt=%x" }, + { CTL_DEBUG_SPACE, "space=%x" }, + { CTL_DEBUG_MISC, "misc=%x" }, + { CTL_DEBUG_ERROR, NULL } +}; + +#define HANDLE_CASE(id, name, val) \ + case CTL_DEBUG_ ## id: \ + vx_debug_ ## name = val; \ + printk("vs_debug_" #name "=0x%x\n", val); \ + break + + +static int __init vs_debug_setup(char *str) +{ + char *p; + int token; + + printk("vs_debug_setup(%s)\n", str); + while ((p = strsep(&str, ",")) != NULL) { + substring_t args[MAX_OPT_ARGS]; + unsigned int value; + + if (!*p) + continue; + + token = match_token(p, tokens, args); + value = (token > 0) ? simple_strtoul(args[0].from, NULL, 0) : 0; + + switch (token) { + HANDLE_CASE(SWITCH, switch, value); + HANDLE_CASE(XID, xid, value); + HANDLE_CASE(NID, nid, value); + HANDLE_CASE(TAG, tag, value); + HANDLE_CASE(NET, net, value); + HANDLE_CASE(LIMIT, limit, value); + HANDLE_CASE(CRES, cres, value); + HANDLE_CASE(DLIM, dlim, value); + HANDLE_CASE(QUOTA, quota, value); + HANDLE_CASE(CVIRT, cvirt, value); + HANDLE_CASE(SPACE, space, value); + HANDLE_CASE(MISC, misc, value); + default: + return -EINVAL; + break; + } + } + return 1; +} + +__setup("vsdebug=", vs_debug_setup); + + + +EXPORT_SYMBOL_GPL(vx_debug_switch); +EXPORT_SYMBOL_GPL(vx_debug_xid); +EXPORT_SYMBOL_GPL(vx_debug_nid); +EXPORT_SYMBOL_GPL(vx_debug_net); +EXPORT_SYMBOL_GPL(vx_debug_limit); +EXPORT_SYMBOL_GPL(vx_debug_cres); +EXPORT_SYMBOL_GPL(vx_debug_dlim); +EXPORT_SYMBOL_GPL(vx_debug_quota); +EXPORT_SYMBOL_GPL(vx_debug_cvirt); +EXPORT_SYMBOL_GPL(vx_debug_space); +EXPORT_SYMBOL_GPL(vx_debug_misc); + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/tag.c kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/tag.c --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/tag.c 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/tag.c 2012-01-16 14:51:22.061408197 +0100 @@ -0,0 +1,63 @@ +/* + * linux/kernel/vserver/tag.c + * + * Virtual Server: Shallow Tag Space + * + * Copyright (C) 2007 Herbert Pötzl + * + * V0.01 basic implementation + * + */ + +#include +#include +#include +#include + +#include + + +int dx_migrate_task(struct task_struct *p, tag_t tag) +{ + if (!p) + BUG(); + + vxdprintk(VXD_CBIT(tag, 5), + "dx_migrate_task(%p[#%d],#%d)", p, p->tag, tag); + + task_lock(p); + p->tag = tag; + task_unlock(p); + + vxdprintk(VXD_CBIT(tag, 5), + "moved task %p into [#%d]", p, tag); + return 0; +} + +/* vserver syscall commands below here */ + +/* taks xid and vx_info functions */ + + +int vc_task_tag(uint32_t id) +{ + tag_t tag; + + if (id) { + struct task_struct *tsk; + read_lock(&tasklist_lock); + tsk = find_task_by_real_pid(id); + tag = (tsk) ? tsk->tag : -ESRCH; + read_unlock(&tasklist_lock); + } else + tag = dx_current_tag(); + return tag; +} + + +int vc_tag_migrate(uint32_t tag) +{ + return dx_migrate_task(current, tag & 0xFFFF); +} + + diff -Nur kernel-2.6.32.54/linux-2.6.32/kernel/vserver/vci_config.h kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/vci_config.h --- kernel-2.6.32.54/linux-2.6.32/kernel/vserver/vci_config.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/kernel/vserver/vci_config.h 2012-01-16 14:51:22.061408197 +0100 @@ -0,0 +1,81 @@ + +/* interface version */ + +#define VCI_VERSION 0x00020305 + + +enum { + VCI_KCBIT_NO_DYNAMIC = 0, + + VCI_KCBIT_PROC_SECURE = 4, + VCI_KCBIT_HARDCPU = 5, + VCI_KCBIT_IDLELIMIT = 6, + VCI_KCBIT_IDLETIME = 7, + + VCI_KCBIT_COWBL = 8, + VCI_KCBIT_FULLCOWBL = 9, + VCI_KCBIT_SPACES = 10, + VCI_KCBIT_NETV2 = 11, + + VCI_KCBIT_DEBUG = 16, + VCI_KCBIT_HISTORY = 20, + VCI_KCBIT_TAGGED = 24, + VCI_KCBIT_PPTAG = 28, + + VCI_KCBIT_MORE = 31, +}; + + +static inline uint32_t vci_kernel_config(void) +{ + return + (1 << VCI_KCBIT_NO_DYNAMIC) | + + /* configured features */ +#ifdef CONFIG_VSERVER_PROC_SECURE + (1 << VCI_KCBIT_PROC_SECURE) | +#endif +#ifdef CONFIG_VSERVER_HARDCPU + (1 << VCI_KCBIT_HARDCPU) | +#endif +#ifdef CONFIG_VSERVER_IDLELIMIT + (1 << VCI_KCBIT_IDLELIMIT) | +#endif +#ifdef CONFIG_VSERVER_IDLETIME + (1 << VCI_KCBIT_IDLETIME) | +#endif +#ifdef CONFIG_VSERVER_COWBL + (1 << VCI_KCBIT_COWBL) | + (1 << VCI_KCBIT_FULLCOWBL) | +#endif + (1 << VCI_KCBIT_SPACES) | + (1 << VCI_KCBIT_NETV2) | + + /* debug options */ +#ifdef CONFIG_VSERVER_DEBUG + (1 << VCI_KCBIT_DEBUG) | +#endif +#ifdef CONFIG_VSERVER_HISTORY + (1 << VCI_KCBIT_HISTORY) | +#endif + + /* inode context tagging */ +#if defined(CONFIG_TAGGING_NONE) + (0 << VCI_KCBIT_TAGGED) | +#elif defined(CONFIG_TAGGING_UID16) + (1 << VCI_KCBIT_TAGGED) | +#elif defined(CONFIG_TAGGING_GID16) + (2 << VCI_KCBIT_TAGGED) | +#elif defined(CONFIG_TAGGING_ID24) + (3 << VCI_KCBIT_TAGGED) | +#elif defined(CONFIG_TAGGING_INTERN) + (4 << VCI_KCBIT_TAGGED) | +#elif defined(CONFIG_TAGGING_RUNTIME) + (5 << VCI_KCBIT_TAGGED) | +#else + (7 << VCI_KCBIT_TAGGED) | +#endif + (1 << VCI_KCBIT_PPTAG) | + 0; +} + diff -Nur kernel-2.6.32.54/linux-2.6.32/Makefile.orig kernel-2.6.32.54.vs/linux-2.6.32/Makefile.orig --- kernel-2.6.32.54/linux-2.6.32/Makefile.orig 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/Makefile.orig 2012-01-16 14:47:18.000000000 +0100 @@ -0,0 +1,1598 @@ +VERSION = 2 +PATCHLEVEL = 6 +SUBLEVEL = 32 +EXTRAVERSION = .54 +NAME = Man-Eating Seals of Antiquity + +# *DOCUMENTATION* +# To see a list of typical targets execute "make help" +# More info can be located in ./README +# Comments in this file are targeted only to the developer, do not +# expect to learn how to build the kernel reading this file. + +# Do not: +# o use make's built-in rules and variables +# (this increases performance and avoids hard-to-debug behaviour); +# o print "Entering directory ..."; +MAKEFLAGS += -rR --no-print-directory + +# We are using a recursive build, so we need to do a little thinking +# to get the ordering right. +# +# Most importantly: sub-Makefiles should only ever modify files in +# their own directory. If in some directory we have a dependency on +# a file in another dir (which doesn't happen often, but it's often +# unavoidable when linking the built-in.o targets which finally +# turn into vmlinux), we will call a sub make in that other dir, and +# after that we are sure that everything which is in that other dir +# is now up to date. +# +# The only cases where we need to modify files which have global +# effects are thus separated out and done before the recursive +# descending is started. They are now explicitly listed as the +# prepare rule. + +# To put more focus on warnings, be less verbose as default +# Use 'make V=1' to see the full commands + +ifeq ("$(origin V)", "command line") + KBUILD_VERBOSE = $(V) +endif +ifndef KBUILD_VERBOSE + KBUILD_VERBOSE = 0 +endif + +# Call a source code checker (by default, "sparse") as part of the +# C compilation. +# +# Use 'make C=1' to enable checking of only re-compiled files. +# Use 'make C=2' to enable checking of *all* source files, regardless +# of whether they are re-compiled or not. +# +# See the file "Documentation/sparse.txt" for more details, including +# where to get the "sparse" utility. + +ifeq ("$(origin C)", "command line") + KBUILD_CHECKSRC = $(C) +endif +ifndef KBUILD_CHECKSRC + KBUILD_CHECKSRC = 0 +endif + +# Use make M=dir to specify directory of external module to build +# Old syntax make ... SUBDIRS=$PWD is still supported +# Setting the environment variable KBUILD_EXTMOD take precedence +ifdef SUBDIRS + KBUILD_EXTMOD ?= $(SUBDIRS) +endif + +ifeq ("$(origin M)", "command line") + KBUILD_EXTMOD := $(M) +endif + +# kbuild supports saving output files in a separate directory. +# To locate output files in a separate directory two syntaxes are supported. +# In both cases the working directory must be the root of the kernel src. +# 1) O= +# Use "make O=dir/to/store/output/files/" +# +# 2) Set KBUILD_OUTPUT +# Set the environment variable KBUILD_OUTPUT to point to the directory +# where the output files shall be placed. +# export KBUILD_OUTPUT=dir/to/store/output/files/ +# make +# +# The O= assignment takes precedence over the KBUILD_OUTPUT environment +# variable. + + +# KBUILD_SRC is set on invocation of make in OBJ directory +# KBUILD_SRC is not intended to be used by the regular user (for now) +ifeq ($(KBUILD_SRC),) + +# OK, Make called in directory where kernel src resides +# Do we want to locate output files in a separate directory? +ifeq ("$(origin O)", "command line") + KBUILD_OUTPUT := $(O) +endif + +# That's our default target when none is given on the command line +PHONY := _all +_all: + +# Cancel implicit rules on top Makefile +$(CURDIR)/Makefile Makefile: ; + +ifneq ($(KBUILD_OUTPUT),) +# Invoke a second make in the output directory, passing relevant variables +# check that the output directory actually exists +saved-output := $(KBUILD_OUTPUT) +KBUILD_OUTPUT := $(shell cd $(KBUILD_OUTPUT) && /bin/pwd) +$(if $(KBUILD_OUTPUT),, \ + $(error output directory "$(saved-output)" does not exist)) + +PHONY += $(MAKECMDGOALS) sub-make + +$(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make + $(Q)@: + +sub-make: FORCE + $(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \ + KBUILD_SRC=$(CURDIR) \ + KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile \ + $(filter-out _all sub-make,$(MAKECMDGOALS)) + +# Leave processing to above invocation of make +skip-makefile := 1 +endif # ifneq ($(KBUILD_OUTPUT),) +endif # ifeq ($(KBUILD_SRC),) + +# We process the rest of the Makefile if this is the final invocation of make +ifeq ($(skip-makefile),) + +# If building an external module we do not care about the all: rule +# but instead _all depend on modules +PHONY += all +ifeq ($(KBUILD_EXTMOD),) +_all: all +else +_all: modules +endif + +srctree := $(if $(KBUILD_SRC),$(KBUILD_SRC),$(CURDIR)) +objtree := $(CURDIR) +src := $(srctree) +obj := $(objtree) + +VPATH := $(srctree)$(if $(KBUILD_EXTMOD),:$(KBUILD_EXTMOD)) + +export srctree objtree VPATH + + +# SUBARCH tells the usermode build what the underlying arch is. That is set +# first, and if a usermode build is happening, the "ARCH=um" on the command +# line overrides the setting of ARCH below. If a native build is happening, +# then ARCH is assigned, getting whatever value it gets normally, and +# SUBARCH is subsequently ignored. + +SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ + -e s/arm.*/arm/ -e s/sa110/arm/ \ + -e s/s390x/s390/ -e s/parisc64/parisc/ \ + -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ + -e s/sh[234].*/sh/ ) + +# Cross compiling and selecting different set of gcc/bin-utils +# --------------------------------------------------------------------------- +# +# When performing cross compilation for other architectures ARCH shall be set +# to the target architecture. (See arch/* for the possibilities). +# ARCH can be set during invocation of make: +# make ARCH=ia64 +# Another way is to have ARCH set in the environment. +# The default ARCH is the host where make is executed. + +# CROSS_COMPILE specify the prefix used for all executables used +# during compilation. Only gcc and related bin-utils executables +# are prefixed with $(CROSS_COMPILE). +# CROSS_COMPILE can be set on the command line +# make CROSS_COMPILE=ia64-linux- +# Alternatively CROSS_COMPILE can be set in the environment. +# Default value for CROSS_COMPILE is not to prefix executables +# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile +export KBUILD_BUILDHOST := $(SUBARCH) +ARCH ?= $(SUBARCH) +CROSS_COMPILE ?= + +# Architecture as present in compile.h +UTS_MACHINE := $(ARCH) +SRCARCH := $(ARCH) + +# Additional ARCH settings for x86 +ifeq ($(ARCH),i386) + SRCARCH := x86 +endif +ifeq ($(ARCH),x86_64) + SRCARCH := x86 +endif + +# Additional ARCH settings for sparc +ifeq ($(ARCH),sparc64) + SRCARCH := sparc +endif + +# Additional ARCH settings for sh +ifeq ($(ARCH),sh64) + SRCARCH := sh +endif + +# Where to locate arch specific headers +hdr-arch := $(SRCARCH) + +ifeq ($(ARCH),m68knommu) + hdr-arch := m68k +endif + +KCONFIG_CONFIG ?= .config + +# SHELL used by kbuild +CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ + else if [ -x /bin/bash ]; then echo /bin/bash; \ + else echo sh; fi ; fi) + +HOSTCC = gcc +HOSTCXX = g++ +HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer +HOSTCXXFLAGS = -O2 + +# Decide whether to build built-in, modular, or both. +# Normally, just do built-in. + +KBUILD_MODULES := +KBUILD_BUILTIN := 1 + +# If we have only "make modules", don't compile built-in objects. +# When we're building modules with modversions, we need to consider +# the built-in objects during the descend as well, in order to +# make sure the checksums are up to date before we record them. + +ifeq ($(MAKECMDGOALS),modules) + KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) +endif + +# If we have "make modules", compile modules +# in addition to whatever we do anyway. +# Just "make" or "make all" shall build modules as well + +ifneq ($(filter all _all modules,$(MAKECMDGOALS)),) + KBUILD_MODULES := 1 +endif + +ifeq ($(MAKECMDGOALS),) + KBUILD_MODULES := 1 +endif + +export KBUILD_MODULES KBUILD_BUILTIN +export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD + +# Beautify output +# --------------------------------------------------------------------------- +# +# Normally, we echo the whole command before executing it. By making +# that echo $($(quiet)$(cmd)), we now have the possibility to set +# $(quiet) to choose other forms of output instead, e.g. +# +# quiet_cmd_cc_o_c = Compiling $(RELDIR)/$@ +# cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $< +# +# If $(quiet) is empty, the whole command will be printed. +# If it is set to "quiet_", only the short version will be printed. +# If it is set to "silent_", nothing will be printed at all, since +# the variable $(silent_cmd_cc_o_c) doesn't exist. +# +# A simple variant is to prefix commands with $(Q) - that's useful +# for commands that shall be hidden in non-verbose mode. +# +# $(Q)ln $@ :< +# +# If KBUILD_VERBOSE equals 0 then the above command will be hidden. +# If KBUILD_VERBOSE equals 1 then the above command is displayed. + +ifeq ($(KBUILD_VERBOSE),1) + quiet = + Q = +else + quiet=quiet_ + Q = @ +endif + +# If the user is running make -s (silent mode), suppress echoing of +# commands + +ifneq ($(findstring s,$(MAKEFLAGS)),) + quiet=silent_ +endif + +export quiet Q KBUILD_VERBOSE + + +# Look for make include files relative to root of kernel src +MAKEFLAGS += --include-dir=$(srctree) + +# We need some generic definitions (do not try to remake the file). +$(srctree)/scripts/Kbuild.include: ; +include $(srctree)/scripts/Kbuild.include + +# Make variables (CC, etc...) + +AS = $(CROSS_COMPILE)as +LD = $(CROSS_COMPILE)ld +CC = $(CROSS_COMPILE)gcc +CPP = $(CC) -E +AR = $(CROSS_COMPILE)ar +NM = $(CROSS_COMPILE)nm +STRIP = $(CROSS_COMPILE)strip +OBJCOPY = $(CROSS_COMPILE)objcopy +OBJDUMP = $(CROSS_COMPILE)objdump +AWK = awk +GENKSYMS = scripts/genksyms/genksyms +INSTALLKERNEL := installkernel +DEPMOD = /sbin/depmod +KALLSYMS = scripts/kallsyms +PERL = perl +CHECK = sparse + +CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ + -Wbitwise -Wno-return-void $(CF) +MODFLAGS = -DMODULE +CFLAGS_MODULE = $(MODFLAGS) +AFLAGS_MODULE = $(MODFLAGS) +LDFLAGS_MODULE = -T $(srctree)/scripts/module-common.lds +CFLAGS_KERNEL = +AFLAGS_KERNEL = +CFLAGS_GCOV = -fprofile-arcs -ftest-coverage + + +# Use LINUXINCLUDE when you must reference the include/ directory. +# Needed to be compatible with the O= option +LINUXINCLUDE := -Iinclude \ + $(if $(KBUILD_SRC),-Iinclude2 -I$(srctree)/include) \ + -I$(srctree)/arch/$(hdr-arch)/include \ + -include include/linux/autoconf.h + +KBUILD_CPPFLAGS := -D__KERNEL__ + +KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ + -fno-strict-aliasing -fno-common \ + -Werror-implicit-function-declaration \ + -Wno-format-security \ + -fno-delete-null-pointer-checks +KBUILD_AFLAGS := -D__ASSEMBLY__ + +# Read KERNELRELEASE from include/config/kernel.release (if it exists) +KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) +KERNELVERSION = $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) + +export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION +export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC +export CPP AR NM STRIP OBJCOPY OBJDUMP +export MAKE AWK GENKSYMS INSTALLKERNEL PERL UTS_MACHINE +export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS + +export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS +export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV +export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE + +# When compiling out-of-tree modules, put MODVERDIR in the module +# tree rather than in the kernel tree. The kernel tree might +# even be read-only. +export MODVERDIR := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_versions + +# Files to ignore in find ... statements + +RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS -o -name .pc -o -name .hg -o -name .git \) -prune -o +export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exclude CVS --exclude .pc --exclude .hg --exclude .git + +# =========================================================================== +# Rules shared between *config targets and build targets + +# Basic helpers built in scripts/ +PHONY += scripts_basic +scripts_basic: + $(Q)$(MAKE) $(build)=scripts/basic + +# To avoid any implicit rule to kick in, define an empty command. +scripts/basic/%: scripts_basic ; + +PHONY += outputmakefile +# outputmakefile generates a Makefile in the output directory, if using a +# separate output directory. This allows convenient use of make in the +# output directory. +outputmakefile: +ifneq ($(KBUILD_SRC),) + $(Q)ln -fsn $(srctree) source + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile \ + $(srctree) $(objtree) $(VERSION) $(PATCHLEVEL) +endif + +# To make sure we do not include .config for any of the *config targets +# catch them early, and hand them over to scripts/kconfig/Makefile +# It is allowed to specify more targets when calling make, including +# mixing *config targets and build targets. +# For example 'make oldconfig all'. +# Detect when mixed targets is specified, and make a second invocation +# of make so .config is not included in this case either (for *config). + +no-dot-config-targets := clean mrproper distclean \ + cscope TAGS tags help %docs check% \ + include/linux/version.h headers_% \ + kernelrelease kernelversion + +config-targets := 0 +mixed-targets := 0 +dot-config := 1 + +ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),) + ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),) + dot-config := 0 + endif +endif + +ifeq ($(KBUILD_EXTMOD),) + ifneq ($(filter config %config,$(MAKECMDGOALS)),) + config-targets := 1 + ifneq ($(filter-out config %config,$(MAKECMDGOALS)),) + mixed-targets := 1 + endif + endif +endif + +ifeq ($(mixed-targets),1) +# =========================================================================== +# We're called with mixed targets (*config and build targets). +# Handle them one by one. + +%:: FORCE + $(Q)$(MAKE) -C $(srctree) KBUILD_SRC= $@ + +else +ifeq ($(config-targets),1) +# =========================================================================== +# *config targets only - make sure prerequisites are updated, and descend +# in scripts/kconfig to make the *config target + +# Read arch specific Makefile to set KBUILD_DEFCONFIG as needed. +# KBUILD_DEFCONFIG may point out an alternative default configuration +# used for 'make defconfig' +include $(srctree)/arch/$(SRCARCH)/Makefile +export KBUILD_DEFCONFIG KBUILD_KCONFIG + +config: scripts_basic outputmakefile FORCE + $(Q)mkdir -p include/linux include/config + $(Q)$(MAKE) $(build)=scripts/kconfig $@ + +%config: scripts_basic outputmakefile FORCE + $(Q)mkdir -p include/linux include/config + $(Q)$(MAKE) $(build)=scripts/kconfig $@ + +else +# =========================================================================== +# Build targets only - this includes vmlinux, arch specific targets, clean +# targets and others. In general all targets except *config targets. + +ifeq ($(KBUILD_EXTMOD),) +# Additional helpers built in scripts/ +# Carefully list dependencies so we do not try to build scripts twice +# in parallel +PHONY += scripts +scripts: scripts_basic include/config/auto.conf + $(Q)$(MAKE) $(build)=$(@) + +# Objects we will link into vmlinux / subdirs we need to visit +init-y := init/ +drivers-y := drivers/ sound/ firmware/ +net-y := net/ +libs-y := lib/ +core-y := usr/ +endif # KBUILD_EXTMOD + +ifeq ($(dot-config),1) +# Read in config +-include include/config/auto.conf + +ifeq ($(KBUILD_EXTMOD),) +# Read in dependencies to all Kconfig* files, make sure to run +# oldconfig if changes are detected. +-include include/config/auto.conf.cmd + +# To avoid any implicit rule to kick in, define an empty command +$(KCONFIG_CONFIG) include/config/auto.conf.cmd: ; + +# If .config is newer than include/config/auto.conf, someone tinkered +# with it and forgot to run make oldconfig. +# if auto.conf.cmd is missing then we are probably in a cleaned tree so +# we execute the config step to be sure to catch updated Kconfig files +include/config/auto.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd + $(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig +else +# external modules needs include/linux/autoconf.h and include/config/auto.conf +# but do not care if they are up-to-date. Use auto.conf to trigger the test +PHONY += include/config/auto.conf + +include/config/auto.conf: + $(Q)test -e include/linux/autoconf.h -a -e $@ || ( \ + echo; \ + echo " ERROR: Kernel configuration is invalid."; \ + echo " include/linux/autoconf.h or $@ are missing."; \ + echo " Run 'make oldconfig && make prepare' on kernel src to fix it."; \ + echo; \ + /bin/false) + +endif # KBUILD_EXTMOD + +else +# Dummy target needed, because used as prerequisite +include/config/auto.conf: ; +endif # $(dot-config) + +# The all: target is the default when no target is given on the +# command line. +# This allow a user to issue only 'make' to build a kernel including modules +# Defaults vmlinux but it is usually overridden in the arch makefile +all: vmlinux + +ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE +KBUILD_CFLAGS += -Os +else +KBUILD_CFLAGS += -O2 +endif + +include $(srctree)/arch/$(SRCARCH)/Makefile + +ifneq ($(CONFIG_FRAME_WARN),0) +KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN}) +endif + +# Force gcc to behave correct even for buggy distributions +ifndef CONFIG_CC_STACKPROTECTOR +KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector) +endif + +# This warning generated too much noise in a regular build. +KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) + +ifdef CONFIG_FRAME_POINTER +KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls +else +KBUILD_CFLAGS += -fomit-frame-pointer +endif + +ifdef CONFIG_DEBUG_INFO +KBUILD_CFLAGS += -g +KBUILD_AFLAGS += -gdwarf-2 +endif + +ifdef CONFIG_FUNCTION_TRACER +KBUILD_CFLAGS += -pg +endif + +# We trigger additional mismatches with less inlining +ifdef CONFIG_DEBUG_SECTION_MISMATCH +KBUILD_CFLAGS += $(call cc-option, -fno-inline-functions-called-once) +endif + +# arch Makefile may override CC so keep this after arch Makefile is included +NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include) +CHECKFLAGS += $(NOSTDINC_FLAGS) + +# warn about C99 declaration after statement +KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,) + +# disable pointer signed / unsigned warnings in gcc 4.0 +KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) + +# disable invalid "can't wrap" optimizations for signed / pointers +KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) + +# revert to pre-gcc-4.4 behaviour of .eh_frame +KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm) + +# conserve stack if available +KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) + +# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments +# But warn user when we do so +warn-assign = \ +$(warning "WARNING: Appending $$K$(1) ($(K$(1))) from $(origin K$(1)) to kernel $$$(1)") + +ifneq ($(KCPPFLAGS),) + $(call warn-assign,CPPFLAGS) + KBUILD_CPPFLAGS += $(KCPPFLAGS) +endif +ifneq ($(KAFLAGS),) + $(call warn-assign,AFLAGS) + KBUILD_AFLAGS += $(KAFLAGS) +endif +ifneq ($(KCFLAGS),) + $(call warn-assign,CFLAGS) + KBUILD_CFLAGS += $(KCFLAGS) +endif + +# Use --build-id when available. +LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\ + $(call cc-ldoption, -Wl$(comma)--build-id,)) +LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID) +LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID) + +ifeq ($(CONFIG_STRIP_ASM_SYMS),y) +LDFLAGS_vmlinux += $(call ld-option, -X,) +endif + +# Default kernel image to build when no specific target is given. +# KBUILD_IMAGE may be overruled on the command line or +# set in the environment +# Also any assignments in arch/$(ARCH)/Makefile take precedence over +# this default value +export KBUILD_IMAGE ?= vmlinux + +# +# INSTALL_PATH specifies where to place the updated kernel and system map +# images. Default is /boot, but you can set it to other values +export INSTALL_PATH ?= /boot + +# +# INSTALL_MOD_PATH specifies a prefix to MODLIB for module directory +# relocations required by build roots. This is not defined in the +# makefile but the argument can be passed to make if needed. +# + +MODLIB = $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE) +export MODLIB + +# +# INSTALL_MOD_STRIP, if defined, will cause modules to be +# stripped after they are installed. If INSTALL_MOD_STRIP is '1', then +# the default option --strip-debug will be used. Otherwise, +# INSTALL_MOD_STRIP will used as the options to the strip command. + +ifdef INSTALL_MOD_STRIP +ifeq ($(INSTALL_MOD_STRIP),1) +mod_strip_cmd = $(STRIP) --strip-debug +else +mod_strip_cmd = $(STRIP) $(INSTALL_MOD_STRIP) +endif # INSTALL_MOD_STRIP=1 +else +mod_strip_cmd = true +endif # INSTALL_MOD_STRIP +export mod_strip_cmd + + +ifeq ($(KBUILD_EXTMOD),) +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ + +vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ + $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ + $(net-y) $(net-m) $(libs-y) $(libs-m))) + +vmlinux-alldirs := $(sort $(vmlinux-dirs) $(patsubst %/,%,$(filter %/, \ + $(init-n) $(init-) \ + $(core-n) $(core-) $(drivers-n) $(drivers-) \ + $(net-n) $(net-) $(libs-n) $(libs-)))) + +init-y := $(patsubst %/, %/built-in.o, $(init-y)) +core-y := $(patsubst %/, %/built-in.o, $(core-y)) +drivers-y := $(patsubst %/, %/built-in.o, $(drivers-y)) +net-y := $(patsubst %/, %/built-in.o, $(net-y)) +libs-y1 := $(patsubst %/, %/lib.a, $(libs-y)) +libs-y2 := $(patsubst %/, %/built-in.o, $(libs-y)) +libs-y := $(libs-y1) $(libs-y2) + +# Build vmlinux +# --------------------------------------------------------------------------- +# vmlinux is built from the objects selected by $(vmlinux-init) and +# $(vmlinux-main). Most are built-in.o files from top-level directories +# in the kernel tree, others are specified in arch/$(ARCH)/Makefile. +# Ordering when linking is important, and $(vmlinux-init) must be first. +# +# vmlinux +# ^ +# | +# +-< $(vmlinux-init) +# | +--< init/version.o + more +# | +# +--< $(vmlinux-main) +# | +--< driver/built-in.o mm/built-in.o + more +# | +# +-< kallsyms.o (see description in CONFIG_KALLSYMS section) +# +# vmlinux version (uname -v) cannot be updated during normal +# descending-into-subdirs phase since we do not yet know if we need to +# update vmlinux. +# Therefore this step is delayed until just before final link of vmlinux - +# except in the kallsyms case where it is done just before adding the +# symbols to the kernel. +# +# System.map is generated to document addresses of all kernel symbols + +vmlinux-init := $(head-y) $(init-y) +vmlinux-main := $(core-y) $(libs-y) $(drivers-y) $(net-y) +vmlinux-all := $(vmlinux-init) $(vmlinux-main) +vmlinux-lds := arch/$(SRCARCH)/kernel/vmlinux.lds +export KBUILD_VMLINUX_OBJS := $(vmlinux-all) + +# Rule to link vmlinux - also used during CONFIG_KALLSYMS +# May be overridden by arch/$(ARCH)/Makefile +quiet_cmd_vmlinux__ ?= LD $@ + cmd_vmlinux__ ?= $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) -o $@ \ + -T $(vmlinux-lds) $(vmlinux-init) \ + --start-group $(vmlinux-main) --end-group \ + $(filter-out $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o FORCE ,$^) + +# Generate new vmlinux version +quiet_cmd_vmlinux_version = GEN .version + cmd_vmlinux_version = set -e; \ + if [ ! -r .version ]; then \ + rm -f .version; \ + echo 1 >.version; \ + else \ + mv .version .old_version; \ + expr 0$$(cat .old_version) + 1 >.version; \ + fi; \ + $(MAKE) $(build)=init + +# Generate System.map +quiet_cmd_sysmap = SYSMAP + cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap + +# Link of vmlinux +# If CONFIG_KALLSYMS is set .version is already updated +# Generate System.map and verify that the content is consistent +# Use + in front of the vmlinux_version rule to silent warning with make -j2 +# First command is ':' to allow us to use + in front of the rule +define rule_vmlinux__ + : + $(if $(CONFIG_KALLSYMS),,+$(call cmd,vmlinux_version)) + + $(call cmd,vmlinux__) + $(Q)echo 'cmd_$@ := $(cmd_vmlinux__)' > $(@D)/.$(@F).cmd + + $(Q)$(if $($(quiet)cmd_sysmap), \ + echo ' $($(quiet)cmd_sysmap) System.map' &&) \ + $(cmd_sysmap) $@ System.map; \ + if [ $$? -ne 0 ]; then \ + rm -f $@; \ + /bin/false; \ + fi; + $(verify_kallsyms) +endef + + +ifdef CONFIG_KALLSYMS +# Generate section listing all symbols and add it into vmlinux $(kallsyms.o) +# It's a three stage process: +# o .tmp_vmlinux1 has all symbols and sections, but __kallsyms is +# empty +# Running kallsyms on that gives us .tmp_kallsyms1.o with +# the right size - vmlinux version (uname -v) is updated during this step +# o .tmp_vmlinux2 now has a __kallsyms section of the right size, +# but due to the added section, some addresses have shifted. +# From here, we generate a correct .tmp_kallsyms2.o +# o The correct .tmp_kallsyms2.o is linked into the final vmlinux. +# o Verify that the System.map from vmlinux matches the map from +# .tmp_vmlinux2, just in case we did not generate kallsyms correctly. +# o If CONFIG_KALLSYMS_EXTRA_PASS is set, do an extra pass using +# .tmp_vmlinux3 and .tmp_kallsyms3.o. This is only meant as a +# temporary bypass to allow the kernel to be built while the +# maintainers work out what went wrong with kallsyms. + +ifdef CONFIG_KALLSYMS_EXTRA_PASS +last_kallsyms := 3 +else +last_kallsyms := 2 +endif + +kallsyms.o := .tmp_kallsyms$(last_kallsyms).o + +define verify_kallsyms + $(Q)$(if $($(quiet)cmd_sysmap), \ + echo ' $($(quiet)cmd_sysmap) .tmp_System.map' &&) \ + $(cmd_sysmap) .tmp_vmlinux$(last_kallsyms) .tmp_System.map + $(Q)cmp -s System.map .tmp_System.map || \ + (echo Inconsistent kallsyms data; \ + echo Try setting CONFIG_KALLSYMS_EXTRA_PASS; \ + rm .tmp_kallsyms* ; /bin/false ) +endef + +# Update vmlinux version before link +# Use + in front of this rule to silent warning about make -j1 +# First command is ':' to allow us to use + in front of this rule +cmd_ksym_ld = $(cmd_vmlinux__) +define rule_ksym_ld + : + +$(call cmd,vmlinux_version) + $(call cmd,vmlinux__) + $(Q)echo 'cmd_$@ := $(cmd_vmlinux__)' > $(@D)/.$(@F).cmd +endef + +# Generate .S file with all kernel symbols +quiet_cmd_kallsyms = KSYM $@ + cmd_kallsyms = $(NM) -n $< | $(KALLSYMS) \ + $(if $(CONFIG_KALLSYMS_ALL),--all-symbols) > $@ + +.tmp_kallsyms1.o .tmp_kallsyms2.o .tmp_kallsyms3.o: %.o: %.S scripts FORCE + $(call if_changed_dep,as_o_S) + +.tmp_kallsyms%.S: .tmp_vmlinux% $(KALLSYMS) + $(call cmd,kallsyms) + +# .tmp_vmlinux1 must be complete except kallsyms, so update vmlinux version +.tmp_vmlinux1: $(vmlinux-lds) $(vmlinux-all) FORCE + $(call if_changed_rule,ksym_ld) + +.tmp_vmlinux2: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms1.o FORCE + $(call if_changed,vmlinux__) + +.tmp_vmlinux3: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms2.o FORCE + $(call if_changed,vmlinux__) + +# Needs to visit scripts/ before $(KALLSYMS) can be used. +$(KALLSYMS): scripts ; + +# Generate some data for debugging strange kallsyms problems +debug_kallsyms: .tmp_map$(last_kallsyms) + +.tmp_map%: .tmp_vmlinux% FORCE + ($(OBJDUMP) -h $< | $(AWK) '/^ +[0-9]/{print $$4 " 0 " $$2}'; $(NM) $<) | sort > $@ + +.tmp_map3: .tmp_map2 + +.tmp_map2: .tmp_map1 + +endif # ifdef CONFIG_KALLSYMS + +# Do modpost on a prelinked vmlinux. The finally linked vmlinux has +# relevant sections renamed as per the linker script. +quiet_cmd_vmlinux-modpost = LD $@ + cmd_vmlinux-modpost = $(LD) $(LDFLAGS) -r -o $@ \ + $(vmlinux-init) --start-group $(vmlinux-main) --end-group \ + $(filter-out $(vmlinux-init) $(vmlinux-main) FORCE ,$^) +define rule_vmlinux-modpost + : + +$(call cmd,vmlinux-modpost) + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost $@ + $(Q)echo 'cmd_$@ := $(cmd_vmlinux-modpost)' > $(dot-target).cmd +endef + +# vmlinux image - including updated kernel symbols +vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE +ifdef CONFIG_HEADERS_CHECK + $(Q)$(MAKE) -f $(srctree)/Makefile headers_check +endif +ifdef CONFIG_SAMPLES + $(Q)$(MAKE) $(build)=samples +endif +ifdef CONFIG_BUILD_DOCSRC + $(Q)$(MAKE) $(build)=Documentation +endif + $(call vmlinux-modpost) + $(call if_changed_rule,vmlinux__) + $(Q)rm -f .old_version + +# build vmlinux.o first to catch section mismatch errors early +ifdef CONFIG_KALLSYMS +.tmp_vmlinux1: vmlinux.o +endif + +modpost-init := $(filter-out init/built-in.o, $(vmlinux-init)) +vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE + $(call if_changed_rule,vmlinux-modpost) + +# The actual objects are generated when descending, +# make sure no implicit rule kicks in +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ; + +# Handle descending into subdirectories listed in $(vmlinux-dirs) +# Preset locale variables to speed up the build process. Limit locale +# tweaks to this spot to avoid wrong language settings when running +# make menuconfig etc. +# Error messages still appears in the original language + +PHONY += $(vmlinux-dirs) +$(vmlinux-dirs): prepare scripts + $(Q)$(MAKE) $(build)=$@ + +# Build the kernel release string +# +# The KERNELRELEASE value built here is stored in the file +# include/config/kernel.release, and is used when executing several +# make targets, such as "make install" or "make modules_install." +# +# The eventual kernel release string consists of the following fields, +# shown in a hierarchical format to show how smaller parts are concatenated +# to form the larger and final value, with values coming from places like +# the Makefile, kernel config options, make command line options and/or +# SCM tag information. +# +# $(KERNELVERSION) +# $(VERSION) eg, 2 +# $(PATCHLEVEL) eg, 6 +# $(SUBLEVEL) eg, 18 +# $(EXTRAVERSION) eg, -rc6 +# $(localver-full) +# $(localver) +# localversion* (files without backups, containing '~') +# $(CONFIG_LOCALVERSION) (from kernel config setting) +# $(localver-auto) (only if CONFIG_LOCALVERSION_AUTO is set) +# ./scripts/setlocalversion (SCM tag, if one exists) +# $(LOCALVERSION) (from make command line if provided) +# +# Note how the final $(localver-auto) string is included *only* if the +# kernel config option CONFIG_LOCALVERSION_AUTO is selected. Also, at the +# moment, only git is supported but other SCMs can edit the script +# scripts/setlocalversion and add the appropriate checks as needed. + +pattern = ".*/localversion[^~]*" +string = $(shell cat /dev/null \ + `find $(objtree) $(srctree) -maxdepth 1 -regex $(pattern) | sort -u`) + +localver = $(subst $(space),, $(string) \ + $(patsubst "%",%,$(CONFIG_LOCALVERSION))) + +# If CONFIG_LOCALVERSION_AUTO is set scripts/setlocalversion is called +# and if the SCM is know a tag from the SCM is appended. +# The appended tag is determined by the SCM used. +# +# .scmversion is used when generating rpm packages so we do not loose +# the version information from the SCM when we do the build of the kernel +# from the copied source +ifdef CONFIG_LOCALVERSION_AUTO + +ifeq ($(wildcard .scmversion),) + _localver-auto = $(shell $(CONFIG_SHELL) \ + $(srctree)/scripts/setlocalversion $(srctree)) +else + _localver-auto = $(shell cat .scmversion 2> /dev/null) +endif + + localver-auto = $(LOCALVERSION)$(_localver-auto) +endif + +localver-full = $(localver)$(localver-auto) + +# Store (new) KERNELRELASE string in include/config/kernel.release +kernelrelease = $(KERNELVERSION)$(localver-full) +include/config/kernel.release: include/config/auto.conf FORCE + $(Q)rm -f $@ + $(Q)echo $(kernelrelease) > $@ + + +# Things we need to do before we recursively start building the kernel +# or the modules are listed in "prepare". +# A multi level approach is used. prepareN is processed before prepareN-1. +# archprepare is used in arch Makefiles and when processed asm symlink, +# version.h and scripts_basic is processed / created. + +# Listed in dependency order +PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 + +# prepare3 is used to check if we are building in a separate output directory, +# and if so do: +# 1) Check that make has not been executed in the kernel src $(srctree) +# 2) Create the include2 directory, used for the second asm symlink +prepare3: include/config/kernel.release +ifneq ($(KBUILD_SRC),) + @$(kecho) ' Using $(srctree) as source for kernel' + $(Q)if [ -f $(srctree)/.config -o -d $(srctree)/include/config ]; then \ + echo " $(srctree) is not clean, please run 'make mrproper'";\ + echo " in the '$(srctree)' directory.";\ + /bin/false; \ + fi; + $(Q)if [ ! -d include2 ]; then \ + mkdir -p include2; \ + ln -fsn $(srctree)/include/asm-$(SRCARCH) include2/asm; \ + fi +endif + +# prepare2 creates a makefile if using a separate output directory +prepare2: prepare3 outputmakefile + +prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \ + include/asm include/config/auto.conf + $(cmd_crmodverdir) + +archprepare: prepare1 scripts_basic + +prepare0: archprepare FORCE + $(Q)$(MAKE) $(build)=. + $(Q)$(MAKE) $(build)=. missing-syscalls + +# All the preparing.. +prepare: prepare0 + +# The asm symlink changes when $(ARCH) changes. +# Detect this and ask user to run make mrproper +# If asm is a stale symlink (point to dir that does not exist) remove it +define check-symlink + set -e; \ + if [ -L include/asm ]; then \ + asmlink=`readlink include/asm | cut -d '-' -f 2`; \ + if [ "$$asmlink" != "$(SRCARCH)" ]; then \ + echo "ERROR: the symlink $@ points to asm-$$asmlink but asm-$(SRCARCH) was expected"; \ + echo " set ARCH or save .config and run 'make mrproper' to fix it"; \ + exit 1; \ + fi; \ + test -e $$asmlink || rm include/asm; \ + elif [ -d include/asm ]; then \ + echo "ERROR: $@ is a directory but a symlink was expected";\ + exit 1; \ + fi +endef + +# We create the target directory of the symlink if it does +# not exist so the test in check-symlink works and we have a +# directory for generated filesas used by some architectures. +define create-symlink + if [ ! -L include/asm ]; then \ + $(kecho) ' SYMLINK $@ -> include/asm-$(SRCARCH)'; \ + if [ ! -d include/asm-$(SRCARCH) ]; then \ + mkdir -p include/asm-$(SRCARCH); \ + fi; \ + ln -fsn asm-$(SRCARCH) $@; \ + fi +endef + +include/asm: FORCE + $(Q)$(check-symlink) + $(Q)$(create-symlink) + +# Generate some files +# --------------------------------------------------------------------------- + +# KERNELRELEASE can change from a few different places, meaning version.h +# needs to be updated, so this check is forced on all builds + +uts_len := 64 +define filechk_utsrelease.h + if [ `echo -n "$(KERNELRELEASE)" | wc -c ` -gt $(uts_len) ]; then \ + echo '"$(KERNELRELEASE)" exceeds $(uts_len) characters' >&2; \ + exit 1; \ + fi; \ + (echo \#define UTS_RELEASE \"$(KERNELRELEASE)\";) +endef + +define filechk_version.h + (echo \#define LINUX_VERSION_CODE $(shell \ + expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \ + echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';) +endef + +include/linux/version.h: $(srctree)/Makefile FORCE + $(call filechk,version.h) + +include/linux/utsrelease.h: include/config/kernel.release FORCE + $(call filechk,utsrelease.h) + +PHONY += headerdep +headerdep: + $(Q)find include/ -name '*.h' | xargs --max-args 1 scripts/headerdep.pl + +# --------------------------------------------------------------------------- + +PHONY += depend dep +depend dep: + @echo '*** Warning: make $@ is unnecessary now.' + +# --------------------------------------------------------------------------- +# Firmware install +INSTALL_FW_PATH=$(INSTALL_MOD_PATH)/lib/firmware +export INSTALL_FW_PATH + +PHONY += firmware_install +firmware_install: FORCE + @mkdir -p $(objtree)/firmware + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_install + +# --------------------------------------------------------------------------- +# Kernel headers + +#Default location for installed headers +export INSTALL_HDR_PATH = $(objtree)/usr + +hdr-inst := -rR -f $(srctree)/scripts/Makefile.headersinst obj +# Find out where the Kbuild file is located to support +# arch/$(ARCH)/include/asm +hdr-dir = $(strip \ + $(if $(wildcard $(srctree)/arch/$(hdr-arch)/include/asm/Kbuild), \ + arch/$(hdr-arch)/include/asm, include/asm-$(hdr-arch))) + +# If we do an all arch process set dst to asm-$(hdr-arch) +hdr-dst = $(if $(KBUILD_HEADERS), dst=include/asm-$(hdr-arch), dst=include/asm) + +PHONY += __headers +__headers: include/linux/version.h scripts_basic FORCE + $(Q)$(MAKE) $(build)=scripts scripts/unifdef + +PHONY += headers_install_all +headers_install_all: + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/headers.sh install + +PHONY += headers_install +headers_install: __headers + $(if $(wildcard $(srctree)/$(hdr-dir)/Kbuild),, \ + $(error Headers not exportable for the $(SRCARCH) architecture)) + $(Q)$(MAKE) $(hdr-inst)=include + $(Q)$(MAKE) $(hdr-inst)=$(hdr-dir) $(hdr-dst) + +PHONY += headers_check_all +headers_check_all: headers_install_all + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/headers.sh check + +PHONY += headers_check +headers_check: headers_install + $(Q)$(MAKE) $(hdr-inst)=include HDRCHECK=1 + $(Q)$(MAKE) $(hdr-inst)=$(hdr-dir) $(hdr-dst) HDRCHECK=1 + +# --------------------------------------------------------------------------- +# Modules + +ifdef CONFIG_MODULES + +# By default, build modules as well + +all: modules + +# Build modules +# +# A module can be listed more than once in obj-m resulting in +# duplicate lines in modules.order files. Those are removed +# using awk while concatenating to the final file. + +PHONY += modules +modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) + $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order + @$(kecho) ' Building modules, stage 2.'; + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modbuild + + +# Target to prepare building external modules +PHONY += modules_prepare +modules_prepare: prepare scripts + +# Target to install modules +PHONY += modules_install +modules_install: _modinst_ _modinst_post + +PHONY += _modinst_ +_modinst_: + @if [ -z "`$(DEPMOD) -V 2>/dev/null | grep module-init-tools`" ]; then \ + echo "Warning: you may need to install module-init-tools"; \ + echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt";\ + sleep 1; \ + fi + @rm -rf $(MODLIB)/kernel + @rm -f $(MODLIB)/source + @mkdir -p $(MODLIB)/kernel + @ln -s $(srctree) $(MODLIB)/source + @if [ ! $(objtree) -ef $(MODLIB)/build ]; then \ + rm -f $(MODLIB)/build ; \ + ln -s $(objtree) $(MODLIB)/build ; \ + fi + @cp -f $(objtree)/modules.order $(MODLIB)/ + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst + +# This depmod is only for convenience to give the initial +# boot a modules.dep even before / is mounted read-write. However the +# boot script depmod is the master version. +PHONY += _modinst_post +_modinst_post: _modinst_ + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modinst + $(call cmd,depmod) + +else # CONFIG_MODULES + +# Modules not configured +# --------------------------------------------------------------------------- + +modules modules_install: FORCE + @echo + @echo "The present kernel configuration has modules disabled." + @echo "Type 'make config' and enable loadable module support." + @echo "Then build a kernel with module support enabled." + @echo + @exit 1 + +endif # CONFIG_MODULES + +### +# Cleaning is done on three levels. +# make clean Delete most generated files +# Leave enough to build external modules +# make mrproper Delete the current configuration, and all generated files +# make distclean Remove editor backup files, patch leftover files and the like + +# Directories & files removed with 'make clean' +CLEAN_DIRS += $(MODVERDIR) +CLEAN_FILES += vmlinux System.map \ + .tmp_kallsyms* .tmp_version .tmp_vmlinux* .tmp_System.map + +# Directories & files removed with 'make mrproper' +MRPROPER_DIRS += include/config include2 usr/include include/generated +MRPROPER_FILES += .config .config.old include/asm .version .old_version \ + include/linux/autoconf.h include/linux/version.h \ + include/linux/utsrelease.h \ + include/linux/bounds.h include/asm*/asm-offsets.h \ + Module.symvers Module.markers tags TAGS cscope* + +# clean - Delete most, but leave enough to build external modules +# +clean: rm-dirs := $(CLEAN_DIRS) +clean: rm-files := $(CLEAN_FILES) +clean-dirs := $(addprefix _clean_,$(srctree) $(vmlinux-alldirs) Documentation) + +PHONY += $(clean-dirs) clean archclean +$(clean-dirs): + $(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@) + +clean: archclean $(clean-dirs) + $(call cmd,rmdirs) + $(call cmd,rmfiles) + @find . $(RCS_FIND_IGNORE) \ + \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \ + -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \ + -o -name '*.symtypes' -o -name 'modules.order' \ + -o -name 'Module.markers' -o -name '.tmp_*.o.*' \ + -o -name '*.gcno' \) -type f -print | xargs rm -f + +# mrproper - Delete all generated files, including .config +# +mrproper: rm-dirs := $(wildcard $(MRPROPER_DIRS)) +mrproper: rm-files := $(wildcard $(MRPROPER_FILES)) +mrproper-dirs := $(addprefix _mrproper_,Documentation/DocBook scripts) + +PHONY += $(mrproper-dirs) mrproper archmrproper +$(mrproper-dirs): + $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@) + +mrproper: clean archmrproper $(mrproper-dirs) + $(call cmd,rmdirs) + $(call cmd,rmfiles) + +# distclean +# +PHONY += distclean + +distclean: mrproper + @find $(srctree) $(RCS_FIND_IGNORE) \ + \( -name '*.orig' -o -name '*.rej' -o -name '*~' \ + -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \ + -o -name '.*.rej' -o -size 0 \ + -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \ + -type f -print | xargs rm -f + + +# Packaging of the kernel to various formats +# --------------------------------------------------------------------------- +# rpm target kept for backward compatibility +package-dir := $(srctree)/scripts/package + +%pkg: include/config/kernel.release FORCE + $(Q)$(MAKE) $(build)=$(package-dir) $@ +rpm: include/config/kernel.release FORCE + $(Q)$(MAKE) $(build)=$(package-dir) $@ + + +# Brief documentation of the typical targets used +# --------------------------------------------------------------------------- + +boards := $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*_defconfig) +boards := $(notdir $(boards)) +board-dirs := $(dir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*/*_defconfig)) +board-dirs := $(sort $(notdir $(board-dirs:/=))) + +help: + @echo 'Cleaning targets:' + @echo ' clean - Remove most generated files but keep the config and' + @echo ' enough build support to build external modules' + @echo ' mrproper - Remove all generated files + config + various backup files' + @echo ' distclean - mrproper + remove editor backup and patch files' + @echo '' + @echo 'Configuration targets:' + @$(MAKE) -f $(srctree)/scripts/kconfig/Makefile help + @echo '' + @echo 'Other generic targets:' + @echo ' all - Build all targets marked with [*]' + @echo '* vmlinux - Build the bare kernel' + @echo '* modules - Build all modules' + @echo ' modules_install - Install all modules to INSTALL_MOD_PATH (default: /)' + @echo ' firmware_install- Install all firmware to INSTALL_FW_PATH' + @echo ' (default: $$(INSTALL_MOD_PATH)/lib/firmware)' + @echo ' dir/ - Build all files in dir and below' + @echo ' dir/file.[ois] - Build specified target only' + @echo ' dir/file.ko - Build module including final link' + @echo ' modules_prepare - Set up for building external modules' + @echo ' tags/TAGS - Generate tags file for editors' + @echo ' cscope - Generate cscope index' + @echo ' kernelrelease - Output the release version string' + @echo ' kernelversion - Output the version stored in Makefile' + @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \ + echo ' (default: $(INSTALL_HDR_PATH))'; \ + echo '' + @echo 'Static analysers' + @echo ' checkstack - Generate a list of stack hogs' + @echo ' namespacecheck - Name space analysis on compiled kernel' + @echo ' versioncheck - Sanity check on version.h usage' + @echo ' includecheck - Check for duplicate included header files' + @echo ' export_report - List the usages of all exported symbols' + @echo ' headers_check - Sanity check on exported headers' + @echo ' headerdep - Detect inclusion cycles in headers'; \ + echo '' + @echo 'Kernel packaging:' + @$(MAKE) $(build)=$(package-dir) help + @echo '' + @echo 'Documentation targets:' + @$(MAKE) -f $(srctree)/Documentation/DocBook/Makefile dochelp + @echo '' + @echo 'Architecture specific targets ($(SRCARCH)):' + @$(if $(archhelp),$(archhelp),\ + echo ' No architecture specific help defined for $(SRCARCH)') + @echo '' + @$(if $(boards), \ + $(foreach b, $(boards), \ + printf " %-24s - Build for %s\\n" $(b) $(subst _defconfig,,$(b));) \ + echo '') + @$(if $(board-dirs), \ + $(foreach b, $(board-dirs), \ + printf " %-16s - Show %s-specific targets\\n" help-$(b) $(b);) \ + printf " %-16s - Show all of the above\\n" help-boards; \ + echo '') + + @echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build' + @echo ' make V=2 [targets] 2 => give reason for rebuild of target' + @echo ' make O=dir [targets] Locate all output files in "dir", including .config' + @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' + @echo ' make C=2 [targets] Force check of all c source with $$CHECK' + @echo '' + @echo 'Execute "make" or "make all" to build all targets marked with [*] ' + @echo 'For further info see the ./README file' + + +help-board-dirs := $(addprefix help-,$(board-dirs)) + +help-boards: $(help-board-dirs) + +boards-per-dir = $(notdir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/$*/*_defconfig)) + +$(help-board-dirs): help-%: + @echo 'Architecture specific targets ($(SRCARCH) $*):' + @$(if $(boards-per-dir), \ + $(foreach b, $(boards-per-dir), \ + printf " %-24s - Build for %s\\n" $*/$(b) $(subst _defconfig,,$(b));) \ + echo '') + + +# Documentation targets +# --------------------------------------------------------------------------- +%docs: scripts_basic FORCE + $(Q)$(MAKE) $(build)=Documentation/DocBook $@ + +else # KBUILD_EXTMOD + +### +# External module support. +# When building external modules the kernel used as basis is considered +# read-only, and no consistency checks are made and the make +# system is not used on the basis kernel. If updates are required +# in the basis kernel ordinary make commands (without M=...) must +# be used. +# +# The following are the only valid targets when building external +# modules. +# make M=dir clean Delete all automatically generated files +# make M=dir modules Make all modules in specified dir +# make M=dir Same as 'make M=dir modules' +# make M=dir modules_install +# Install the modules built in the module directory +# Assumes install directory is already created + +# We are always building modules +KBUILD_MODULES := 1 +PHONY += crmodverdir +crmodverdir: + $(cmd_crmodverdir) + +PHONY += $(objtree)/Module.symvers +$(objtree)/Module.symvers: + @test -e $(objtree)/Module.symvers || ( \ + echo; \ + echo " WARNING: Symbol version dump $(objtree)/Module.symvers"; \ + echo " is missing; modules will have no dependencies and modversions."; \ + echo ) + +module-dirs := $(addprefix _module_,$(KBUILD_EXTMOD)) +PHONY += $(module-dirs) modules +$(module-dirs): crmodverdir $(objtree)/Module.symvers + $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@) + +modules: $(module-dirs) + @$(kecho) ' Building modules, stage 2.'; + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost + +PHONY += modules_install +modules_install: _emodinst_ _emodinst_post + +install-dir := $(if $(INSTALL_MOD_DIR),$(INSTALL_MOD_DIR),extra) +PHONY += _emodinst_ +_emodinst_: + $(Q)mkdir -p $(MODLIB)/$(install-dir) + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst + +PHONY += _emodinst_post +_emodinst_post: _emodinst_ + $(call cmd,depmod) + +clean-dirs := $(addprefix _clean_,$(KBUILD_EXTMOD)) + +PHONY += $(clean-dirs) clean +$(clean-dirs): + $(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@) + +clean: rm-dirs := $(MODVERDIR) +clean: rm-files := $(KBUILD_EXTMOD)/Module.symvers \ + $(KBUILD_EXTMOD)/Module.markers \ + $(KBUILD_EXTMOD)/modules.order +clean: $(clean-dirs) + $(call cmd,rmdirs) + $(call cmd,rmfiles) + @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \ + \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \ + -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \ + -o -name '*.gcno' \) -type f -print | xargs rm -f + +help: + @echo ' Building external modules.' + @echo ' Syntax: make -C path/to/kernel/src M=$$PWD target' + @echo '' + @echo ' modules - default target, build the module(s)' + @echo ' modules_install - install the module' + @echo ' clean - remove generated files in module directory only' + @echo '' + +# Dummies... +PHONY += prepare scripts +prepare: ; +scripts: ; +endif # KBUILD_EXTMOD + +# Generate tags for editors +# --------------------------------------------------------------------------- +quiet_cmd_tags = GEN $@ + cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@ + +tags TAGS cscope: FORCE + $(call cmd,tags) + +# Scripts to check various things for consistency +# --------------------------------------------------------------------------- + +includecheck: + find * $(RCS_FIND_IGNORE) \ + -name '*.[hcS]' -type f -print | sort \ + | xargs $(PERL) -w $(srctree)/scripts/checkincludes.pl + +versioncheck: + find * $(RCS_FIND_IGNORE) \ + -name '*.[hcS]' -type f -print | sort \ + | xargs $(PERL) -w $(srctree)/scripts/checkversion.pl + +namespacecheck: + $(PERL) $(srctree)/scripts/namespace.pl + +export_report: + $(PERL) $(srctree)/scripts/export_report.pl + +endif #ifeq ($(config-targets),1) +endif #ifeq ($(mixed-targets),1) + +PHONY += checkstack kernelrelease kernelversion + +# UML needs a little special treatment here. It wants to use the host +# toolchain, so needs $(SUBARCH) passed to checkstack.pl. Everyone +# else wants $(ARCH), including people doing cross-builds, which means +# that $(SUBARCH) doesn't work here. +ifeq ($(ARCH), um) +CHECKSTACK_ARCH := $(SUBARCH) +else +CHECKSTACK_ARCH := $(ARCH) +endif +checkstack: + $(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \ + $(PERL) $(src)/scripts/checkstack.pl $(CHECKSTACK_ARCH) + +kernelrelease: + $(if $(wildcard include/config/kernel.release), $(Q)echo $(KERNELRELEASE), \ + $(error kernelrelease not valid - run 'make prepare' to update it)) +kernelversion: + @echo $(KERNELVERSION) + +# Single targets +# --------------------------------------------------------------------------- +# Single targets are compatible with: +# - build with mixed source and output +# - build with separate output dir 'make O=...' +# - external modules +# +# target-dir => where to store outputfile +# build-dir => directory in kernel source tree to use + +ifeq ($(KBUILD_EXTMOD),) + build-dir = $(patsubst %/,%,$(dir $@)) + target-dir = $(dir $@) +else + zap-slash=$(filter-out .,$(patsubst %/,%,$(dir $@))) + build-dir = $(KBUILD_EXTMOD)$(if $(zap-slash),/$(zap-slash)) + target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@)) +endif + +%.s: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +%.i: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +%.o: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +%.lst: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +%.s: %.S prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +%.o: %.S prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +%.symtypes: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) + +# Modules +/: prepare scripts FORCE + $(cmd_crmodverdir) + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ + $(build)=$(build-dir) +%/: prepare scripts FORCE + $(cmd_crmodverdir) + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ + $(build)=$(build-dir) +%.ko: prepare scripts FORCE + $(cmd_crmodverdir) + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ + $(build)=$(build-dir) $(@:.ko=.o) + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost + +# FIXME Should go into a make.lib or something +# =========================================================================== + +quiet_cmd_rmdirs = $(if $(wildcard $(rm-dirs)),CLEAN $(wildcard $(rm-dirs))) + cmd_rmdirs = rm -rf $(rm-dirs) + +quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files))) + cmd_rmfiles = rm -f $(rm-files) + +# Run depmod only if we have System.map and depmod is executable +quiet_cmd_depmod = DEPMOD $(KERNELRELEASE) + cmd_depmod = \ + if [ -r System.map -a -x $(DEPMOD) ]; then \ + $(DEPMOD) -ae -F System.map \ + $(if $(strip $(INSTALL_MOD_PATH)), -b $(INSTALL_MOD_PATH) ) \ + $(KERNELRELEASE); \ + fi + +# Create temporary dir for module support files +# clean it up only when building all modules +cmd_crmodverdir = $(Q)mkdir -p $(MODVERDIR) \ + $(if $(KBUILD_MODULES),; rm -f $(MODVERDIR)/*) + +a_flags = -Wp,-MD,$(depfile) $(KBUILD_AFLAGS) $(AFLAGS_KERNEL) \ + $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(KBUILD_CPPFLAGS) \ + $(modkern_aflags) $(EXTRA_AFLAGS) $(AFLAGS_$(basetarget).o) + +quiet_cmd_as_o_S = AS $@ +cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< + +# read all saved command lines + +targets := $(wildcard $(sort $(targets))) +cmd_files := $(wildcard .*.cmd $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd)) + +ifneq ($(cmd_files),) + $(cmd_files): ; # Do not try to update included dependency files + include $(cmd_files) +endif + +# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.clean obj=dir +# Usage: +# $(Q)$(MAKE) $(clean)=dir +clean := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.clean obj + +endif # skip-makefile + +PHONY += FORCE +FORCE: + +# Declare the contents of the .PHONY variable as phony. We keep that +# information in a variable so we can use it in if_changed and friends. +.PHONY: $(PHONY) diff -Nur kernel-2.6.32.54/linux-2.6.32/Makefile.rej kernel-2.6.32.54.vs/linux-2.6.32/Makefile.rej --- kernel-2.6.32.54/linux-2.6.32/Makefile.rej 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/Makefile.rej 2012-01-16 14:51:21.689409499 +0100 @@ -0,0 +1,11 @@ +--- Makefile 2012-01-10 09:12:29.000000000 +0100 ++++ Makefile 2012-01-10 09:31:18.000000000 +0100 +@@ -1,7 +1,7 @@ + VERSION = 2 + PATCHLEVEL = 6 + SUBLEVEL = 32 +-EXTRAVERSION = .53 ++EXTRAVERSION = .53-vs2.3.0.36.29.8 + NAME = Man-Eating Seals of Antiquity + + # *DOCUMENTATION* diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/allocpercpu.c kernel-2.6.32.54.vs/linux-2.6.32/mm/allocpercpu.c --- kernel-2.6.32.54/linux-2.6.32/mm/allocpercpu.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/allocpercpu.c 2012-01-16 14:51:22.061408197 +0100 @@ -160,12 +160,14 @@ void __init setup_per_cpu_areas(void) { - unsigned long size, i; + unsigned long size, vspc, i; char *ptr; unsigned long nr_possible_cpus = num_possible_cpus(); + vspc = PERCPU_PERCTX * CONFIG_VSERVER_CONTEXTS; + /* Copy section for each CPU (we discard the original) */ - size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE); + size = ALIGN(PERCPU_ENOUGH_ROOM + vspc, PAGE_SIZE); ptr = alloc_bootmem_pages(size * nr_possible_cpus); for_each_possible_cpu(i) { diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/filemap_xip.c kernel-2.6.32.54.vs/linux-2.6.32/mm/filemap_xip.c --- kernel-2.6.32.54/linux-2.6.32/mm/filemap_xip.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/filemap_xip.c 2012-01-16 14:51:22.061408197 +0100 @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/fremap.c kernel-2.6.32.54.vs/linux-2.6.32/mm/fremap.c --- kernel-2.6.32.54/linux-2.6.32/mm/fremap.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/fremap.c 2012-01-16 14:51:22.061408197 +0100 @@ -16,6 +16,7 @@ #include #include #include +#include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/hugetlb.c kernel-2.6.32.54.vs/linux-2.6.32/mm/hugetlb.c --- kernel-2.6.32.54/linux-2.6.32/mm/hugetlb.c 2012-01-16 15:01:39.976725207 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/hugetlb.c 2012-01-16 14:51:22.061408197 +0100 @@ -24,6 +24,7 @@ #include #include +#include #include "internal.h" const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/memcontrol.c kernel-2.6.32.54.vs/linux-2.6.32/mm/memcontrol.c --- kernel-2.6.32.54/linux-2.6.32/mm/memcontrol.c 2012-01-16 15:01:39.980725193 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/memcontrol.c 2012-01-16 14:51:22.065408183 +0100 @@ -549,6 +549,31 @@ struct mem_cgroup, css); } +u64 mem_cgroup_res_read_u64(struct mem_cgroup *mem, int member) +{ + return res_counter_read_u64(&mem->res, member); +} + +u64 mem_cgroup_memsw_read_u64(struct mem_cgroup *mem, int member) +{ + return res_counter_read_u64(&mem->memsw, member); +} + +s64 mem_cgroup_stat_read_cache(struct mem_cgroup *mem) +{ + return mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE); +} + +s64 mem_cgroup_stat_read_anon(struct mem_cgroup *mem) +{ + return mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); +} + +s64 mem_cgroup_stat_read_mapped(struct mem_cgroup *mem) +{ + return mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE); +} + static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) { struct mem_cgroup *mem = NULL; diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/memory.c kernel-2.6.32.54.vs/linux-2.6.32/mm/memory.c --- kernel-2.6.32.54/linux-2.6.32/mm/memory.c 2012-01-16 15:01:40.312724018 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/memory.c 2012-01-16 14:51:22.065408183 +0100 @@ -56,6 +56,7 @@ #include #include #include +// #include #include #include @@ -647,6 +648,9 @@ int progress = 0; int rss[2]; + if (!vx_rss_avail(dst_mm, ((end - addr)/PAGE_SIZE + 1))) + return -ENOMEM; + again: rss[1] = rss[0] = 0; dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); @@ -2681,6 +2685,9 @@ pte_unmap(page_table); + if (!vx_rss_avail(mm, 1)) + goto oom; + /* Check if we need to add a guard page to the stack */ if (check_stack_guard_page(vma, address) < 0) return VM_FAULT_SIGBUS; @@ -2987,6 +2994,7 @@ { pte_t entry; spinlock_t *ptl; + int ret = 0, type = VXPT_UNKNOWN; entry = *pte; if (!pte_present(entry)) { @@ -3011,9 +3019,12 @@ if (unlikely(!pte_same(*pte, entry))) goto unlock; if (flags & FAULT_FLAG_WRITE) { - if (!pte_write(entry)) - return do_wp_page(mm, vma, address, + if (!pte_write(entry)) { + ret = do_wp_page(mm, vma, address, pte, pmd, ptl, entry); + type = VXPT_WRITE; + goto out; + } entry = pte_mkdirty(entry); } entry = pte_mkyoung(entry); @@ -3031,7 +3042,10 @@ } unlock: pte_unmap_unlock(pte, ptl); - return 0; + ret = 0; +out: + vx_page_fault(mm, vma, type, ret); + return ret; } /* diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/memory.c.orig kernel-2.6.32.54.vs/linux-2.6.32/mm/memory.c.orig --- kernel-2.6.32.54/linux-2.6.32/mm/memory.c.orig 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/memory.c.orig 2012-01-16 14:47:19.502254760 +0100 @@ -0,0 +1,3430 @@ +/* + * linux/mm/memory.c + * + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + */ + +/* + * demand-loading started 01.12.91 - seems it is high on the list of + * things wanted, and it should be easy to implement. - Linus + */ + +/* + * Ok, demand-loading was easy, shared pages a little bit tricker. Shared + * pages started 02.12.91, seems to work. - Linus. + * + * Tested sharing by executing about 30 /bin/sh: under the old kernel it + * would have taken more than the 6M I have free, but it worked well as + * far as I could see. + * + * Also corrected some "invalidate()"s - I wasn't doing enough of them. + */ + +/* + * Real VM (paging to/from disk) started 18.12.91. Much more work and + * thought has to go into this. Oh, well.. + * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. + * Found it. Everything seems to work now. + * 20.12.91 - Ok, making the swap-device changeable like the root. + */ + +/* + * 05.04.94 - Multi-page memory management added for v1.1. + * Idea by Alex Bligh (alex@cconcepts.co.uk) + * + * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG + * (Gerhard.Wichert@pdb.siemens.de) + * + * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "internal.h" + +#ifndef CONFIG_NEED_MULTIPLE_NODES +/* use the per-pgdat data instead for discontigmem - mbligh */ +unsigned long max_mapnr; +struct page *mem_map; + +EXPORT_SYMBOL(max_mapnr); +EXPORT_SYMBOL(mem_map); +#endif + +unsigned long num_physpages; +/* + * A number of key systems in x86 including ioremap() rely on the assumption + * that high_memory defines the upper bound on direct map memory, then end + * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and + * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL + * and ZONE_HIGHMEM. + */ +void * high_memory; + +EXPORT_SYMBOL(num_physpages); +EXPORT_SYMBOL(high_memory); + +/* + * Randomize the address space (stacks, mmaps, brk, etc.). + * + * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, + * as ancient (libc5 based) binaries can segfault. ) + */ +int randomize_va_space __read_mostly = +#ifdef CONFIG_COMPAT_BRK + 1; +#else + 2; +#endif + +static int __init disable_randmaps(char *s) +{ + randomize_va_space = 0; + return 1; +} +__setup("norandmaps", disable_randmaps); + +unsigned long zero_pfn __read_mostly; +unsigned long highest_memmap_pfn __read_mostly; + +/* + * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() + */ +static int __init init_zero_pfn(void) +{ + zero_pfn = page_to_pfn(ZERO_PAGE(0)); + return 0; +} +core_initcall(init_zero_pfn); + +/* + * If a p?d_bad entry is found while walking page tables, report + * the error, before resetting entry to p?d_none. Usually (but + * very seldom) called out from the p?d_none_or_clear_bad macros. + */ + +void pgd_clear_bad(pgd_t *pgd) +{ + pgd_ERROR(*pgd); + pgd_clear(pgd); +} + +void pud_clear_bad(pud_t *pud) +{ + pud_ERROR(*pud); + pud_clear(pud); +} + +void pmd_clear_bad(pmd_t *pmd) +{ + pmd_ERROR(*pmd); + pmd_clear(pmd); +} + +/* + * Note: this doesn't free the actual pages themselves. That + * has been handled earlier when unmapping all the memory regions. + */ +static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long addr) +{ + pgtable_t token = pmd_pgtable(*pmd); + pmd_clear(pmd); + pte_free_tlb(tlb, token, addr); + tlb->mm->nr_ptes--; +} + +static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pmd_t *pmd; + unsigned long next; + unsigned long start; + + start = addr; + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (pmd_none_or_clear_bad(pmd)) + continue; + free_pte_range(tlb, pmd, addr); + } while (pmd++, addr = next, addr != end); + + start &= PUD_MASK; + if (start < floor) + return; + if (ceiling) { + ceiling &= PUD_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + return; + + pmd = pmd_offset(pud, start); + pud_clear(pud); + pmd_free_tlb(tlb, pmd, start); +} + +static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pud_t *pud; + unsigned long next; + unsigned long start; + + start = addr; + pud = pud_offset(pgd, addr); + do { + next = pud_addr_end(addr, end); + if (pud_none_or_clear_bad(pud)) + continue; + free_pmd_range(tlb, pud, addr, next, floor, ceiling); + } while (pud++, addr = next, addr != end); + + start &= PGDIR_MASK; + if (start < floor) + return; + if (ceiling) { + ceiling &= PGDIR_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + return; + + pud = pud_offset(pgd, start); + pgd_clear(pgd); + pud_free_tlb(tlb, pud, start); +} + +/* + * This function frees user-level page tables of a process. + * + * Must be called with pagetable lock held. + */ +void free_pgd_range(struct mmu_gather *tlb, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pgd_t *pgd; + unsigned long next; + unsigned long start; + + /* + * The next few lines have given us lots of grief... + * + * Why are we testing PMD* at this top level? Because often + * there will be no work to do at all, and we'd prefer not to + * go all the way down to the bottom just to discover that. + * + * Why all these "- 1"s? Because 0 represents both the bottom + * of the address space and the top of it (using -1 for the + * top wouldn't help much: the masks would do the wrong thing). + * The rule is that addr 0 and floor 0 refer to the bottom of + * the address space, but end 0 and ceiling 0 refer to the top + * Comparisons need to use "end - 1" and "ceiling - 1" (though + * that end 0 case should be mythical). + * + * Wherever addr is brought up or ceiling brought down, we must + * be careful to reject "the opposite 0" before it confuses the + * subsequent tests. But what about where end is brought down + * by PMD_SIZE below? no, end can't go down to 0 there. + * + * Whereas we round start (addr) and ceiling down, by different + * masks at different levels, in order to test whether a table + * now has no other vmas using it, so can be freed, we don't + * bother to round floor or end up - the tests don't need that. + */ + + addr &= PMD_MASK; + if (addr < floor) { + addr += PMD_SIZE; + if (!addr) + return; + } + if (ceiling) { + ceiling &= PMD_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + end -= PMD_SIZE; + if (addr > end - 1) + return; + + start = addr; + pgd = pgd_offset(tlb->mm, addr); + do { + next = pgd_addr_end(addr, end); + if (pgd_none_or_clear_bad(pgd)) + continue; + free_pud_range(tlb, pgd, addr, next, floor, ceiling); + } while (pgd++, addr = next, addr != end); +} + +void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, + unsigned long floor, unsigned long ceiling) +{ + while (vma) { + struct vm_area_struct *next = vma->vm_next; + unsigned long addr = vma->vm_start; + + /* + * Hide vma from rmap and truncate_pagecache before freeing + * pgtables + */ + anon_vma_unlink(vma); + unlink_file_vma(vma); + + if (is_vm_hugetlb_page(vma)) { + hugetlb_free_pgd_range(tlb, addr, vma->vm_end, + floor, next? next->vm_start: ceiling); + } else { + /* + * Optimization: gather nearby vmas into one call down + */ + while (next && next->vm_start <= vma->vm_end + PMD_SIZE + && !is_vm_hugetlb_page(next)) { + vma = next; + next = vma->vm_next; + anon_vma_unlink(vma); + unlink_file_vma(vma); + } + free_pgd_range(tlb, addr, vma->vm_end, + floor, next? next->vm_start: ceiling); + } + vma = next; + } +} + +int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) +{ + pgtable_t new = pte_alloc_one(mm, address); + if (!new) + return -ENOMEM; + + /* + * Ensure all pte setup (eg. pte page lock and page clearing) are + * visible before the pte is made visible to other CPUs by being + * put into page tables. + * + * The other side of the story is the pointer chasing in the page + * table walking code (when walking the page table without locking; + * ie. most of the time). Fortunately, these data accesses consist + * of a chain of data-dependent loads, meaning most CPUs (alpha + * being the notable exception) will already guarantee loads are + * seen in-order. See the alpha page table accessors for the + * smp_read_barrier_depends() barriers in page table walking code. + */ + smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ + + spin_lock(&mm->page_table_lock); + if (!pmd_present(*pmd)) { /* Has another populated it ? */ + mm->nr_ptes++; + pmd_populate(mm, pmd, new); + new = NULL; + } + spin_unlock(&mm->page_table_lock); + if (new) + pte_free(mm, new); + return 0; +} + +int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) +{ + pte_t *new = pte_alloc_one_kernel(&init_mm, address); + if (!new) + return -ENOMEM; + + smp_wmb(); /* See comment in __pte_alloc */ + + spin_lock(&init_mm.page_table_lock); + if (!pmd_present(*pmd)) { /* Has another populated it ? */ + pmd_populate_kernel(&init_mm, pmd, new); + new = NULL; + } + spin_unlock(&init_mm.page_table_lock); + if (new) + pte_free_kernel(&init_mm, new); + return 0; +} + +static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) +{ + if (file_rss) + add_mm_counter(mm, file_rss, file_rss); + if (anon_rss) + add_mm_counter(mm, anon_rss, anon_rss); +} + +/* + * This function is called to print an error when a bad pte + * is found. For example, we might have a PFN-mapped pte in + * a region that doesn't allow it. + * + * The calling function must still handle the error. + */ +static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, + pte_t pte, struct page *page) +{ + pgd_t *pgd = pgd_offset(vma->vm_mm, addr); + pud_t *pud = pud_offset(pgd, addr); + pmd_t *pmd = pmd_offset(pud, addr); + struct address_space *mapping; + pgoff_t index; + static unsigned long resume; + static unsigned long nr_shown; + static unsigned long nr_unshown; + + /* + * Allow a burst of 60 reports, then keep quiet for that minute; + * or allow a steady drip of one report per second. + */ + if (nr_shown == 60) { + if (time_before(jiffies, resume)) { + nr_unshown++; + return; + } + if (nr_unshown) { + printk(KERN_ALERT + "BUG: Bad page map: %lu messages suppressed\n", + nr_unshown); + nr_unshown = 0; + } + nr_shown = 0; + } + if (nr_shown++ == 0) + resume = jiffies + 60 * HZ; + + mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; + index = linear_page_index(vma, addr); + + printk(KERN_ALERT + "BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", + current->comm, + (long long)pte_val(pte), (long long)pmd_val(*pmd)); + if (page) { + printk(KERN_ALERT + "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n", + page, (void *)page->flags, page_count(page), + page_mapcount(page), page->mapping, page->index); + } + printk(KERN_ALERT + "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", + (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); + /* + * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y + */ + if (vma->vm_ops) + print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n", + (unsigned long)vma->vm_ops->fault); + if (vma->vm_file && vma->vm_file->f_op) + print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n", + (unsigned long)vma->vm_file->f_op->mmap); + dump_stack(); + add_taint(TAINT_BAD_PAGE); +} + +static inline int is_cow_mapping(unsigned int flags) +{ + return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; +} + +#ifndef is_zero_pfn +static inline int is_zero_pfn(unsigned long pfn) +{ + return pfn == zero_pfn; +} +#endif + +#ifndef my_zero_pfn +static inline unsigned long my_zero_pfn(unsigned long addr) +{ + return zero_pfn; +} +#endif + +/* + * vm_normal_page -- This function gets the "struct page" associated with a pte. + * + * "Special" mappings do not wish to be associated with a "struct page" (either + * it doesn't exist, or it exists but they don't want to touch it). In this + * case, NULL is returned here. "Normal" mappings do have a struct page. + * + * There are 2 broad cases. Firstly, an architecture may define a pte_special() + * pte bit, in which case this function is trivial. Secondly, an architecture + * may not have a spare pte bit, which requires a more complicated scheme, + * described below. + * + * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a + * special mapping (even if there are underlying and valid "struct pages"). + * COWed pages of a VM_PFNMAP are always normal. + * + * The way we recognize COWed pages within VM_PFNMAP mappings is through the + * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit + * set, and the vm_pgoff will point to the first PFN mapped: thus every special + * mapping will always honor the rule + * + * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) + * + * And for normal mappings this is false. + * + * This restricts such mappings to be a linear translation from virtual address + * to pfn. To get around this restriction, we allow arbitrary mappings so long + * as the vma is not a COW mapping; in that case, we know that all ptes are + * special (because none can have been COWed). + * + * + * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. + * + * VM_MIXEDMAP mappings can likewise contain memory with or without "struct + * page" backing, however the difference is that _all_ pages with a struct + * page (that is, those where pfn_valid is true) are refcounted and considered + * normal pages by the VM. The disadvantage is that pages are refcounted + * (which can be slower and simply not an option for some PFNMAP users). The + * advantage is that we don't have to follow the strict linearity rule of + * PFNMAP mappings in order to support COWable mappings. + * + */ +#ifdef __HAVE_ARCH_PTE_SPECIAL +# define HAVE_PTE_SPECIAL 1 +#else +# define HAVE_PTE_SPECIAL 0 +#endif +struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte) +{ + unsigned long pfn = pte_pfn(pte); + + if (HAVE_PTE_SPECIAL) { + if (likely(!pte_special(pte))) + goto check_pfn; + if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) + return NULL; + if (!is_zero_pfn(pfn)) + print_bad_pte(vma, addr, pte, NULL); + return NULL; + } + + /* !HAVE_PTE_SPECIAL case follows: */ + + if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { + if (vma->vm_flags & VM_MIXEDMAP) { + if (!pfn_valid(pfn)) + return NULL; + goto out; + } else { + unsigned long off; + off = (addr - vma->vm_start) >> PAGE_SHIFT; + if (pfn == vma->vm_pgoff + off) + return NULL; + if (!is_cow_mapping(vma->vm_flags)) + return NULL; + } + } + + if (is_zero_pfn(pfn)) + return NULL; +check_pfn: + if (unlikely(pfn > highest_memmap_pfn)) { + print_bad_pte(vma, addr, pte, NULL); + return NULL; + } + + /* + * NOTE! We still have PageReserved() pages in the page tables. + * eg. VDSO mappings can cause them to exist. + */ +out: + return pfn_to_page(pfn); +} + +/* + * copy one vm_area from one task to the other. Assumes the page tables + * already present in the new task to be cleared in the whole range + * covered by this vma. + */ + +static inline void +copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, + unsigned long addr, int *rss) +{ + unsigned long vm_flags = vma->vm_flags; + pte_t pte = *src_pte; + struct page *page; + + /* pte contains position in swap or file, so copy. */ + if (unlikely(!pte_present(pte))) { + if (!pte_file(pte)) { + swp_entry_t entry = pte_to_swp_entry(pte); + + swap_duplicate(entry); + /* make sure dst_mm is on swapoff's mmlist. */ + if (unlikely(list_empty(&dst_mm->mmlist))) { + spin_lock(&mmlist_lock); + if (list_empty(&dst_mm->mmlist)) + list_add(&dst_mm->mmlist, + &src_mm->mmlist); + spin_unlock(&mmlist_lock); + } + if (is_write_migration_entry(entry) && + is_cow_mapping(vm_flags)) { + /* + * COW mappings require pages in both parent + * and child to be set to read. + */ + make_migration_entry_read(&entry); + pte = swp_entry_to_pte(entry); + set_pte_at(src_mm, addr, src_pte, pte); + } + } + goto out_set_pte; + } + + /* + * If it's a COW mapping, write protect it both + * in the parent and the child + */ + if (is_cow_mapping(vm_flags)) { + ptep_set_wrprotect(src_mm, addr, src_pte); + pte = pte_wrprotect(pte); + } + + /* + * If it's a shared mapping, mark it clean in + * the child + */ + if (vm_flags & VM_SHARED) + pte = pte_mkclean(pte); + pte = pte_mkold(pte); + + page = vm_normal_page(vma, addr, pte); + if (page) { + get_page(page); + page_dup_rmap(page); + rss[PageAnon(page)]++; + } + +out_set_pte: + set_pte_at(dst_mm, addr, dst_pte, pte); +} + +static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, + unsigned long addr, unsigned long end) +{ + pte_t *orig_src_pte, *orig_dst_pte; + pte_t *src_pte, *dst_pte; + spinlock_t *src_ptl, *dst_ptl; + int progress = 0; + int rss[2]; + +again: + rss[1] = rss[0] = 0; + dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); + if (!dst_pte) + return -ENOMEM; + src_pte = pte_offset_map_nested(src_pmd, addr); + src_ptl = pte_lockptr(src_mm, src_pmd); + spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); + orig_src_pte = src_pte; + orig_dst_pte = dst_pte; + arch_enter_lazy_mmu_mode(); + + do { + /* + * We are holding two locks at this point - either of them + * could generate latencies in another task on another CPU. + */ + if (progress >= 32) { + progress = 0; + if (need_resched() || + spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) + break; + } + if (pte_none(*src_pte)) { + progress++; + continue; + } + copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss); + progress += 8; + } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); + + arch_leave_lazy_mmu_mode(); + spin_unlock(src_ptl); + pte_unmap_nested(orig_src_pte); + add_mm_rss(dst_mm, rss[0], rss[1]); + pte_unmap_unlock(orig_dst_pte, dst_ptl); + cond_resched(); + if (addr != end) + goto again; + return 0; +} + +static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, + unsigned long addr, unsigned long end) +{ + pmd_t *src_pmd, *dst_pmd; + unsigned long next; + + dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); + if (!dst_pmd) + return -ENOMEM; + src_pmd = pmd_offset(src_pud, addr); + do { + next = pmd_addr_end(addr, end); + if (pmd_none_or_clear_bad(src_pmd)) + continue; + if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, + vma, addr, next)) + return -ENOMEM; + } while (dst_pmd++, src_pmd++, addr = next, addr != end); + return 0; +} + +static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, + unsigned long addr, unsigned long end) +{ + pud_t *src_pud, *dst_pud; + unsigned long next; + + dst_pud = pud_alloc(dst_mm, dst_pgd, addr); + if (!dst_pud) + return -ENOMEM; + src_pud = pud_offset(src_pgd, addr); + do { + next = pud_addr_end(addr, end); + if (pud_none_or_clear_bad(src_pud)) + continue; + if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, + vma, addr, next)) + return -ENOMEM; + } while (dst_pud++, src_pud++, addr = next, addr != end); + return 0; +} + +int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, + struct vm_area_struct *vma) +{ + pgd_t *src_pgd, *dst_pgd; + unsigned long next; + unsigned long addr = vma->vm_start; + unsigned long end = vma->vm_end; + int ret; + + /* + * Don't copy ptes where a page fault will fill them correctly. + * Fork becomes much lighter when there are big shared or private + * readonly mappings. The tradeoff is that copy_page_range is more + * efficient than faulting. + */ + if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) { + if (!vma->anon_vma) + return 0; + } + + if (is_vm_hugetlb_page(vma)) + return copy_hugetlb_page_range(dst_mm, src_mm, vma); + + if (unlikely(is_pfn_mapping(vma))) { + /* + * We do not free on error cases below as remove_vma + * gets called on error from higher level routine + */ + ret = track_pfn_vma_copy(vma); + if (ret) + return ret; + } + + /* + * We need to invalidate the secondary MMU mappings only when + * there could be a permission downgrade on the ptes of the + * parent mm. And a permission downgrade will only happen if + * is_cow_mapping() returns true. + */ + if (is_cow_mapping(vma->vm_flags)) + mmu_notifier_invalidate_range_start(src_mm, addr, end); + + ret = 0; + dst_pgd = pgd_offset(dst_mm, addr); + src_pgd = pgd_offset(src_mm, addr); + do { + next = pgd_addr_end(addr, end); + if (pgd_none_or_clear_bad(src_pgd)) + continue; + if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, + vma, addr, next))) { + ret = -ENOMEM; + break; + } + } while (dst_pgd++, src_pgd++, addr = next, addr != end); + + if (is_cow_mapping(vma->vm_flags)) + mmu_notifier_invalidate_range_end(src_mm, + vma->vm_start, end); + return ret; +} + +static unsigned long zap_pte_range(struct mmu_gather *tlb, + struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr, unsigned long end, + long *zap_work, struct zap_details *details) +{ + struct mm_struct *mm = tlb->mm; + pte_t *pte; + spinlock_t *ptl; + int file_rss = 0; + int anon_rss = 0; + + pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + arch_enter_lazy_mmu_mode(); + do { + pte_t ptent = *pte; + if (pte_none(ptent)) { + (*zap_work)--; + continue; + } + + (*zap_work) -= PAGE_SIZE; + + if (pte_present(ptent)) { + struct page *page; + + page = vm_normal_page(vma, addr, ptent); + if (unlikely(details) && page) { + /* + * unmap_shared_mapping_pages() wants to + * invalidate cache without truncating: + * unmap shared but keep private pages. + */ + if (details->check_mapping && + details->check_mapping != page->mapping) + continue; + /* + * Each page->index must be checked when + * invalidating or truncating nonlinear. + */ + if (details->nonlinear_vma && + (page->index < details->first_index || + page->index > details->last_index)) + continue; + } + ptent = ptep_get_and_clear_full(mm, addr, pte, + tlb->fullmm); + tlb_remove_tlb_entry(tlb, pte, addr); + if (unlikely(!page)) + continue; + if (unlikely(details) && details->nonlinear_vma + && linear_page_index(details->nonlinear_vma, + addr) != page->index) + set_pte_at(mm, addr, pte, + pgoff_to_pte(page->index)); + if (PageAnon(page)) + anon_rss--; + else { + if (pte_dirty(ptent)) + set_page_dirty(page); + if (pte_young(ptent) && + likely(!VM_SequentialReadHint(vma))) + mark_page_accessed(page); + file_rss--; + } + page_remove_rmap(page); + if (unlikely(page_mapcount(page) < 0)) + print_bad_pte(vma, addr, ptent, page); + tlb_remove_page(tlb, page); + continue; + } + /* + * If details->check_mapping, we leave swap entries; + * if details->nonlinear_vma, we leave file entries. + */ + if (unlikely(details)) + continue; + if (pte_file(ptent)) { + if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) + print_bad_pte(vma, addr, ptent, NULL); + } else if + (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent)))) + print_bad_pte(vma, addr, ptent, NULL); + pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); + } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); + + add_mm_rss(mm, file_rss, anon_rss); + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(pte - 1, ptl); + + return addr; +} + +static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, + struct vm_area_struct *vma, pud_t *pud, + unsigned long addr, unsigned long end, + long *zap_work, struct zap_details *details) +{ + pmd_t *pmd; + unsigned long next; + + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (pmd_none_or_clear_bad(pmd)) { + (*zap_work)--; + continue; + } + next = zap_pte_range(tlb, vma, pmd, addr, next, + zap_work, details); + } while (pmd++, addr = next, (addr != end && *zap_work > 0)); + + return addr; +} + +static inline unsigned long zap_pud_range(struct mmu_gather *tlb, + struct vm_area_struct *vma, pgd_t *pgd, + unsigned long addr, unsigned long end, + long *zap_work, struct zap_details *details) +{ + pud_t *pud; + unsigned long next; + + pud = pud_offset(pgd, addr); + do { + next = pud_addr_end(addr, end); + if (pud_none_or_clear_bad(pud)) { + (*zap_work)--; + continue; + } + next = zap_pmd_range(tlb, vma, pud, addr, next, + zap_work, details); + } while (pud++, addr = next, (addr != end && *zap_work > 0)); + + return addr; +} + +static unsigned long unmap_page_range(struct mmu_gather *tlb, + struct vm_area_struct *vma, + unsigned long addr, unsigned long end, + long *zap_work, struct zap_details *details) +{ + pgd_t *pgd; + unsigned long next; + + if (details && !details->check_mapping && !details->nonlinear_vma) + details = NULL; + + BUG_ON(addr >= end); + tlb_start_vma(tlb, vma); + pgd = pgd_offset(vma->vm_mm, addr); + do { + next = pgd_addr_end(addr, end); + if (pgd_none_or_clear_bad(pgd)) { + (*zap_work)--; + continue; + } + next = zap_pud_range(tlb, vma, pgd, addr, next, + zap_work, details); + } while (pgd++, addr = next, (addr != end && *zap_work > 0)); + tlb_end_vma(tlb, vma); + + return addr; +} + +#ifdef CONFIG_PREEMPT +# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) +#else +/* No preempt: go for improved straight-line efficiency */ +# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) +#endif + +/** + * unmap_vmas - unmap a range of memory covered by a list of vma's + * @tlbp: address of the caller's struct mmu_gather + * @vma: the starting vma + * @start_addr: virtual address at which to start unmapping + * @end_addr: virtual address at which to end unmapping + * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here + * @details: details of nonlinear truncation or shared cache invalidation + * + * Returns the end address of the unmapping (restart addr if interrupted). + * + * Unmap all pages in the vma list. + * + * We aim to not hold locks for too long (for scheduling latency reasons). + * So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to + * return the ending mmu_gather to the caller. + * + * Only addresses between `start' and `end' will be unmapped. + * + * The VMA list must be sorted in ascending virtual address order. + * + * unmap_vmas() assumes that the caller will flush the whole unmapped address + * range after unmap_vmas() returns. So the only responsibility here is to + * ensure that any thus-far unmapped pages are flushed before unmap_vmas() + * drops the lock and schedules. + */ +unsigned long unmap_vmas(struct mmu_gather **tlbp, + struct vm_area_struct *vma, unsigned long start_addr, + unsigned long end_addr, unsigned long *nr_accounted, + struct zap_details *details) +{ + long zap_work = ZAP_BLOCK_SIZE; + unsigned long tlb_start = 0; /* For tlb_finish_mmu */ + int tlb_start_valid = 0; + unsigned long start = start_addr; + spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; + int fullmm = (*tlbp)->fullmm; + struct mm_struct *mm = vma->vm_mm; + + mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); + for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { + unsigned long end; + + start = max(vma->vm_start, start_addr); + if (start >= vma->vm_end) + continue; + end = min(vma->vm_end, end_addr); + if (end <= vma->vm_start) + continue; + + if (vma->vm_flags & VM_ACCOUNT) + *nr_accounted += (end - start) >> PAGE_SHIFT; + + if (unlikely(is_pfn_mapping(vma))) + untrack_pfn_vma(vma, 0, 0); + + while (start != end) { + if (!tlb_start_valid) { + tlb_start = start; + tlb_start_valid = 1; + } + + if (unlikely(is_vm_hugetlb_page(vma))) { + /* + * It is undesirable to test vma->vm_file as it + * should be non-null for valid hugetlb area. + * However, vm_file will be NULL in the error + * cleanup path of do_mmap_pgoff. When + * hugetlbfs ->mmap method fails, + * do_mmap_pgoff() nullifies vma->vm_file + * before calling this function to clean up. + * Since no pte has actually been setup, it is + * safe to do nothing in this case. + */ + if (vma->vm_file) { + unmap_hugepage_range(vma, start, end, NULL); + zap_work -= (end - start) / + pages_per_huge_page(hstate_vma(vma)); + } + + start = end; + } else + start = unmap_page_range(*tlbp, vma, + start, end, &zap_work, details); + + if (zap_work > 0) { + BUG_ON(start != end); + break; + } + + tlb_finish_mmu(*tlbp, tlb_start, start); + + if (need_resched() || + (i_mmap_lock && spin_needbreak(i_mmap_lock))) { + if (i_mmap_lock) { + *tlbp = NULL; + goto out; + } + cond_resched(); + } + + *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm); + tlb_start_valid = 0; + zap_work = ZAP_BLOCK_SIZE; + } + } +out: + mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); + return start; /* which is now the end (or restart) address */ +} + +/** + * zap_page_range - remove user pages in a given range + * @vma: vm_area_struct holding the applicable pages + * @address: starting address of pages to zap + * @size: number of bytes to zap + * @details: details of nonlinear truncation or shared cache invalidation + */ +unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, + unsigned long size, struct zap_details *details) +{ + struct mm_struct *mm = vma->vm_mm; + struct mmu_gather *tlb; + unsigned long end = address + size; + unsigned long nr_accounted = 0; + + lru_add_drain(); + tlb = tlb_gather_mmu(mm, 0); + update_hiwater_rss(mm); + end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); + if (tlb) + tlb_finish_mmu(tlb, address, end); + return end; +} + +/** + * zap_vma_ptes - remove ptes mapping the vma + * @vma: vm_area_struct holding ptes to be zapped + * @address: starting address of pages to zap + * @size: number of bytes to zap + * + * This function only unmaps ptes assigned to VM_PFNMAP vmas. + * + * The entire address range must be fully contained within the vma. + * + * Returns 0 if successful. + */ +int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, + unsigned long size) +{ + if (address < vma->vm_start || address + size > vma->vm_end || + !(vma->vm_flags & VM_PFNMAP)) + return -1; + zap_page_range(vma, address, size, NULL); + return 0; +} +EXPORT_SYMBOL_GPL(zap_vma_ptes); + +/* + * Do a quick page-table lookup for a single page. + */ +struct page *follow_page(struct vm_area_struct *vma, unsigned long address, + unsigned int flags) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *ptep, pte; + spinlock_t *ptl; + struct page *page; + struct mm_struct *mm = vma->vm_mm; + + page = follow_huge_addr(mm, address, flags & FOLL_WRITE); + if (!IS_ERR(page)) { + BUG_ON(flags & FOLL_GET); + goto out; + } + + page = NULL; + pgd = pgd_offset(mm, address); + if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) + goto no_page_table; + + pud = pud_offset(pgd, address); + if (pud_none(*pud)) + goto no_page_table; + if (pud_huge(*pud)) { + BUG_ON(flags & FOLL_GET); + page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE); + goto out; + } + if (unlikely(pud_bad(*pud))) + goto no_page_table; + + pmd = pmd_offset(pud, address); + if (pmd_none(*pmd)) + goto no_page_table; + if (pmd_huge(*pmd)) { + BUG_ON(flags & FOLL_GET); + page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); + goto out; + } + if (unlikely(pmd_bad(*pmd))) + goto no_page_table; + + ptep = pte_offset_map_lock(mm, pmd, address, &ptl); + + pte = *ptep; + if (!pte_present(pte)) + goto no_page; + if ((flags & FOLL_WRITE) && !pte_write(pte)) + goto unlock; + + page = vm_normal_page(vma, address, pte); + if (unlikely(!page)) { + if ((flags & FOLL_DUMP) || + !is_zero_pfn(pte_pfn(pte))) + goto bad_page; + page = pte_page(pte); + } + + if (flags & FOLL_GET) + get_page(page); + if (flags & FOLL_TOUCH) { + if ((flags & FOLL_WRITE) && + !pte_dirty(pte) && !PageDirty(page)) + set_page_dirty(page); + /* + * pte_mkyoung() would be more correct here, but atomic care + * is needed to avoid losing the dirty bit: it is easier to use + * mark_page_accessed(). + */ + mark_page_accessed(page); + } +unlock: + pte_unmap_unlock(ptep, ptl); +out: + return page; + +bad_page: + pte_unmap_unlock(ptep, ptl); + return ERR_PTR(-EFAULT); + +no_page: + pte_unmap_unlock(ptep, ptl); + if (!pte_none(pte)) + return page; + +no_page_table: + /* + * When core dumping an enormous anonymous area that nobody + * has touched so far, we don't want to allocate unnecessary pages or + * page tables. Return error instead of NULL to skip handle_mm_fault, + * then get_dump_page() will return NULL to leave a hole in the dump. + * But we can only make this optimization where a hole would surely + * be zero-filled if handle_mm_fault() actually did handle it. + */ + if ((flags & FOLL_DUMP) && + (!vma->vm_ops || !vma->vm_ops->fault)) + return ERR_PTR(-EFAULT); + return page; +} +EXPORT_SYMBOL_GPL(follow_page); + +int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + unsigned long start, int nr_pages, unsigned int gup_flags, + struct page **pages, struct vm_area_struct **vmas) +{ + int i; + unsigned long vm_flags; + + if (nr_pages <= 0) + return 0; + + VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); + + /* + * Require read or write permissions. + * If FOLL_FORCE is set, we only require the "MAY" flags. + */ + vm_flags = (gup_flags & FOLL_WRITE) ? + (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); + vm_flags &= (gup_flags & FOLL_FORCE) ? + (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); + i = 0; + + do { + struct vm_area_struct *vma; + + vma = find_extend_vma(mm, start); + if (!vma && in_gate_area(tsk, start)) { + unsigned long pg = start & PAGE_MASK; + struct vm_area_struct *gate_vma = get_gate_vma(tsk); + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + /* user gate pages are read-only */ + if (gup_flags & FOLL_WRITE) + return i ? : -EFAULT; + if (pg > TASK_SIZE) + pgd = pgd_offset_k(pg); + else + pgd = pgd_offset_gate(mm, pg); + BUG_ON(pgd_none(*pgd)); + pud = pud_offset(pgd, pg); + BUG_ON(pud_none(*pud)); + pmd = pmd_offset(pud, pg); + if (pmd_none(*pmd)) + return i ? : -EFAULT; + pte = pte_offset_map(pmd, pg); + if (pte_none(*pte)) { + pte_unmap(pte); + return i ? : -EFAULT; + } + if (pages) { + struct page *page; + + page = vm_normal_page(gate_vma, start, *pte); + if (!page) { + if (!(gup_flags & FOLL_DUMP) && + is_zero_pfn(pte_pfn(*pte))) + page = pte_page(*pte); + else { + pte_unmap(pte); + return i ? : -EFAULT; + } + } + pages[i] = page; + get_page(page); + } + pte_unmap(pte); + if (vmas) + vmas[i] = gate_vma; + i++; + start += PAGE_SIZE; + nr_pages--; + continue; + } + + if (!vma || + (vma->vm_flags & (VM_IO | VM_PFNMAP)) || + !(vm_flags & vma->vm_flags)) + return i ? : -EFAULT; + + if (is_vm_hugetlb_page(vma)) { + i = follow_hugetlb_page(mm, vma, pages, vmas, + &start, &nr_pages, i, gup_flags); + continue; + } + + do { + struct page *page; + unsigned int foll_flags = gup_flags; + + /* + * If we have a pending SIGKILL, don't keep faulting + * pages and potentially allocating memory. + */ + if (unlikely(fatal_signal_pending(current))) + return i ? i : -ERESTARTSYS; + + cond_resched(); + while (!(page = follow_page(vma, start, foll_flags))) { + int ret; + + ret = handle_mm_fault(mm, vma, start, + (foll_flags & FOLL_WRITE) ? + FAULT_FLAG_WRITE : 0); + + if (ret & VM_FAULT_ERROR) { + if (ret & VM_FAULT_OOM) + return i ? i : -ENOMEM; + if (ret & + (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS)) + return i ? i : -EFAULT; + BUG(); + } + if (ret & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; + + /* + * The VM_FAULT_WRITE bit tells us that + * do_wp_page has broken COW when necessary, + * even if maybe_mkwrite decided not to set + * pte_write. We can thus safely do subsequent + * page lookups as if they were reads. But only + * do so when looping for pte_write is futile: + * in some cases userspace may also be wanting + * to write to the gotten user page, which a + * read fault here might prevent (a readonly + * page might get reCOWed by userspace write). + */ + if ((ret & VM_FAULT_WRITE) && + !(vma->vm_flags & VM_WRITE)) + foll_flags &= ~FOLL_WRITE; + + cond_resched(); + } + if (IS_ERR(page)) + return i ? i : PTR_ERR(page); + if (pages) { + pages[i] = page; + + flush_anon_page(vma, page, start); + flush_dcache_page(page); + } + if (vmas) + vmas[i] = vma; + i++; + start += PAGE_SIZE; + nr_pages--; + } while (nr_pages && start < vma->vm_end); + } while (nr_pages); + return i; +} + +/** + * get_user_pages() - pin user pages in memory + * @tsk: task_struct of target task + * @mm: mm_struct of target mm + * @start: starting user address + * @nr_pages: number of pages from start to pin + * @write: whether pages will be written to by the caller + * @force: whether to force write access even if user mapping is + * readonly. This will result in the page being COWed even + * in MAP_SHARED mappings. You do not want this. + * @pages: array that receives pointers to the pages pinned. + * Should be at least nr_pages long. Or NULL, if caller + * only intends to ensure the pages are faulted in. + * @vmas: array of pointers to vmas corresponding to each page. + * Or NULL if the caller does not require them. + * + * Returns number of pages pinned. This may be fewer than the number + * requested. If nr_pages is 0 or negative, returns 0. If no pages + * were pinned, returns -errno. Each page returned must be released + * with a put_page() call when it is finished with. vmas will only + * remain valid while mmap_sem is held. + * + * Must be called with mmap_sem held for read or write. + * + * get_user_pages walks a process's page tables and takes a reference to + * each struct page that each user address corresponds to at a given + * instant. That is, it takes the page that would be accessed if a user + * thread accesses the given user virtual address at that instant. + * + * This does not guarantee that the page exists in the user mappings when + * get_user_pages returns, and there may even be a completely different + * page there in some cases (eg. if mmapped pagecache has been invalidated + * and subsequently re faulted). However it does guarantee that the page + * won't be freed completely. And mostly callers simply care that the page + * contains data that was valid *at some point in time*. Typically, an IO + * or similar operation cannot guarantee anything stronger anyway because + * locks can't be held over the syscall boundary. + * + * If write=0, the page must not be written to. If the page is written to, + * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called + * after the page is finished with, and before put_page is called. + * + * get_user_pages is typically used for fewer-copy IO operations, to get a + * handle on the memory by some means other than accesses via the user virtual + * addresses. The pages may be submitted for DMA to devices or accessed via + * their kernel linear mapping (via the kmap APIs). Care should be taken to + * use the correct cache flushing APIs. + * + * See also get_user_pages_fast, for performance critical applications. + */ +int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + unsigned long start, int nr_pages, int write, int force, + struct page **pages, struct vm_area_struct **vmas) +{ + int flags = FOLL_TOUCH; + + if (pages) + flags |= FOLL_GET; + if (write) + flags |= FOLL_WRITE; + if (force) + flags |= FOLL_FORCE; + + return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); +} +EXPORT_SYMBOL(get_user_pages); + +/** + * get_dump_page() - pin user page in memory while writing it to core dump + * @addr: user address + * + * Returns struct page pointer of user page pinned for dump, + * to be freed afterwards by page_cache_release() or put_page(). + * + * Returns NULL on any kind of failure - a hole must then be inserted into + * the corefile, to preserve alignment with its headers; and also returns + * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - + * allowing a hole to be left in the corefile to save diskspace. + * + * Called without mmap_sem, but after all other threads have been killed. + */ +#ifdef CONFIG_ELF_CORE +struct page *get_dump_page(unsigned long addr) +{ + struct vm_area_struct *vma; + struct page *page; + + if (__get_user_pages(current, current->mm, addr, 1, + FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1) + return NULL; + flush_cache_page(vma, addr, page_to_pfn(page)); + return page; +} +#endif /* CONFIG_ELF_CORE */ + +pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, + spinlock_t **ptl) +{ + pgd_t * pgd = pgd_offset(mm, addr); + pud_t * pud = pud_alloc(mm, pgd, addr); + if (pud) { + pmd_t * pmd = pmd_alloc(mm, pud, addr); + if (pmd) + return pte_alloc_map_lock(mm, pmd, addr, ptl); + } + return NULL; +} + +/* + * This is the old fallback for page remapping. + * + * For historical reasons, it only allows reserved pages. Only + * old drivers should use this, and they needed to mark their + * pages reserved for the old functions anyway. + */ +static int insert_page(struct vm_area_struct *vma, unsigned long addr, + struct page *page, pgprot_t prot) +{ + struct mm_struct *mm = vma->vm_mm; + int retval; + pte_t *pte; + spinlock_t *ptl; + + retval = -EINVAL; + if (PageAnon(page)) + goto out; + retval = -ENOMEM; + flush_dcache_page(page); + pte = get_locked_pte(mm, addr, &ptl); + if (!pte) + goto out; + retval = -EBUSY; + if (!pte_none(*pte)) + goto out_unlock; + + /* Ok, finally just insert the thing.. */ + get_page(page); + inc_mm_counter(mm, file_rss); + page_add_file_rmap(page); + set_pte_at(mm, addr, pte, mk_pte(page, prot)); + + retval = 0; + pte_unmap_unlock(pte, ptl); + return retval; +out_unlock: + pte_unmap_unlock(pte, ptl); +out: + return retval; +} + +/** + * vm_insert_page - insert single page into user vma + * @vma: user vma to map to + * @addr: target user address of this page + * @page: source kernel page + * + * This allows drivers to insert individual pages they've allocated + * into a user vma. + * + * The page has to be a nice clean _individual_ kernel allocation. + * If you allocate a compound page, you need to have marked it as + * such (__GFP_COMP), or manually just split the page up yourself + * (see split_page()). + * + * NOTE! Traditionally this was done with "remap_pfn_range()" which + * took an arbitrary page protection parameter. This doesn't allow + * that. Your vma protection will have to be set up correctly, which + * means that if you want a shared writable mapping, you'd better + * ask for a shared writable mapping! + * + * The page does not need to be reserved. + */ +int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, + struct page *page) +{ + if (addr < vma->vm_start || addr >= vma->vm_end) + return -EFAULT; + if (!page_count(page)) + return -EINVAL; + vma->vm_flags |= VM_INSERTPAGE; + return insert_page(vma, addr, page, vma->vm_page_prot); +} +EXPORT_SYMBOL(vm_insert_page); + +static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, pgprot_t prot) +{ + struct mm_struct *mm = vma->vm_mm; + int retval; + pte_t *pte, entry; + spinlock_t *ptl; + + retval = -ENOMEM; + pte = get_locked_pte(mm, addr, &ptl); + if (!pte) + goto out; + retval = -EBUSY; + if (!pte_none(*pte)) + goto out_unlock; + + /* Ok, finally just insert the thing.. */ + entry = pte_mkspecial(pfn_pte(pfn, prot)); + set_pte_at(mm, addr, pte, entry); + update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */ + + retval = 0; +out_unlock: + pte_unmap_unlock(pte, ptl); +out: + return retval; +} + +/** + * vm_insert_pfn - insert single pfn into user vma + * @vma: user vma to map to + * @addr: target user address of this page + * @pfn: source kernel pfn + * + * Similar to vm_inert_page, this allows drivers to insert individual pages + * they've allocated into a user vma. Same comments apply. + * + * This function should only be called from a vm_ops->fault handler, and + * in that case the handler should return NULL. + * + * vma cannot be a COW mapping. + * + * As this is called only for pages that do not currently exist, we + * do not need to flush old virtual caches or the TLB. + */ +int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn) +{ + int ret; + pgprot_t pgprot = vma->vm_page_prot; + /* + * Technically, architectures with pte_special can avoid all these + * restrictions (same for remap_pfn_range). However we would like + * consistency in testing and feature parity among all, so we should + * try to keep these invariants in place for everybody. + */ + BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); + BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == + (VM_PFNMAP|VM_MIXEDMAP)); + BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); + BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); + + if (addr < vma->vm_start || addr >= vma->vm_end) + return -EFAULT; + if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE)) + return -EINVAL; + + ret = insert_pfn(vma, addr, pfn, pgprot); + + if (ret) + untrack_pfn_vma(vma, pfn, PAGE_SIZE); + + return ret; +} +EXPORT_SYMBOL(vm_insert_pfn); + +int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn) +{ + BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); + + if (addr < vma->vm_start || addr >= vma->vm_end) + return -EFAULT; + + /* + * If we don't have pte special, then we have to use the pfn_valid() + * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* + * refcount the page if pfn_valid is true (hence insert_page rather + * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP + * without pte special, it would there be refcounted as a normal page. + */ + if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) { + struct page *page; + + page = pfn_to_page(pfn); + return insert_page(vma, addr, page, vma->vm_page_prot); + } + return insert_pfn(vma, addr, pfn, vma->vm_page_prot); +} +EXPORT_SYMBOL(vm_insert_mixed); + +/* + * maps a range of physical memory into the requested pages. the old + * mappings are removed. any references to nonexistent pages results + * in null mappings (currently treated as "copy-on-access") + */ +static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, unsigned long end, + unsigned long pfn, pgprot_t prot) +{ + pte_t *pte; + spinlock_t *ptl; + + pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); + if (!pte) + return -ENOMEM; + arch_enter_lazy_mmu_mode(); + do { + BUG_ON(!pte_none(*pte)); + set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); + pfn++; + } while (pte++, addr += PAGE_SIZE, addr != end); + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(pte - 1, ptl); + return 0; +} + +static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, + unsigned long addr, unsigned long end, + unsigned long pfn, pgprot_t prot) +{ + pmd_t *pmd; + unsigned long next; + + pfn -= addr >> PAGE_SHIFT; + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + return -ENOMEM; + do { + next = pmd_addr_end(addr, end); + if (remap_pte_range(mm, pmd, addr, next, + pfn + (addr >> PAGE_SHIFT), prot)) + return -ENOMEM; + } while (pmd++, addr = next, addr != end); + return 0; +} + +static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, + unsigned long addr, unsigned long end, + unsigned long pfn, pgprot_t prot) +{ + pud_t *pud; + unsigned long next; + + pfn -= addr >> PAGE_SHIFT; + pud = pud_alloc(mm, pgd, addr); + if (!pud) + return -ENOMEM; + do { + next = pud_addr_end(addr, end); + if (remap_pmd_range(mm, pud, addr, next, + pfn + (addr >> PAGE_SHIFT), prot)) + return -ENOMEM; + } while (pud++, addr = next, addr != end); + return 0; +} + +/** + * remap_pfn_range - remap kernel memory to userspace + * @vma: user vma to map to + * @addr: target user address to start at + * @pfn: physical address of kernel memory + * @size: size of map area + * @prot: page protection flags for this mapping + * + * Note: this is only safe if the mm semaphore is held when called. + */ +int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t prot) +{ + pgd_t *pgd; + unsigned long next; + unsigned long end = addr + PAGE_ALIGN(size); + struct mm_struct *mm = vma->vm_mm; + int err; + + /* + * Physically remapped pages are special. Tell the + * rest of the world about it: + * VM_IO tells people not to look at these pages + * (accesses can have side effects). + * VM_RESERVED is specified all over the place, because + * in 2.4 it kept swapout's vma scan off this vma; but + * in 2.6 the LRU scan won't even find its pages, so this + * flag means no more than count its pages in reserved_vm, + * and omit it from core dump, even when VM_IO turned off. + * VM_PFNMAP tells the core MM that the base pages are just + * raw PFN mappings, and do not have a "struct page" associated + * with them. + * + * There's a horrible special case to handle copy-on-write + * behaviour that some programs depend on. We mark the "original" + * un-COW'ed pages by matching them up with "vma->vm_pgoff". + */ + if (addr == vma->vm_start && end == vma->vm_end) { + vma->vm_pgoff = pfn; + vma->vm_flags |= VM_PFN_AT_MMAP; + } else if (is_cow_mapping(vma->vm_flags)) + return -EINVAL; + + vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; + + err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size)); + if (err) { + /* + * To indicate that track_pfn related cleanup is not + * needed from higher level routine calling unmap_vmas + */ + vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP); + vma->vm_flags &= ~VM_PFN_AT_MMAP; + return -EINVAL; + } + + BUG_ON(addr >= end); + pfn -= addr >> PAGE_SHIFT; + pgd = pgd_offset(mm, addr); + flush_cache_range(vma, addr, end); + do { + next = pgd_addr_end(addr, end); + err = remap_pud_range(mm, pgd, addr, next, + pfn + (addr >> PAGE_SHIFT), prot); + if (err) + break; + } while (pgd++, addr = next, addr != end); + + if (err) + untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size)); + + return err; +} +EXPORT_SYMBOL(remap_pfn_range); + +static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, unsigned long end, + pte_fn_t fn, void *data) +{ + pte_t *pte; + int err; + pgtable_t token; + spinlock_t *uninitialized_var(ptl); + + pte = (mm == &init_mm) ? + pte_alloc_kernel(pmd, addr) : + pte_alloc_map_lock(mm, pmd, addr, &ptl); + if (!pte) + return -ENOMEM; + + BUG_ON(pmd_huge(*pmd)); + + arch_enter_lazy_mmu_mode(); + + token = pmd_pgtable(*pmd); + + do { + err = fn(pte++, token, addr, data); + if (err) + break; + } while (addr += PAGE_SIZE, addr != end); + + arch_leave_lazy_mmu_mode(); + + if (mm != &init_mm) + pte_unmap_unlock(pte-1, ptl); + return err; +} + +static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, + unsigned long addr, unsigned long end, + pte_fn_t fn, void *data) +{ + pmd_t *pmd; + unsigned long next; + int err; + + BUG_ON(pud_huge(*pud)); + + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + return -ENOMEM; + do { + next = pmd_addr_end(addr, end); + err = apply_to_pte_range(mm, pmd, addr, next, fn, data); + if (err) + break; + } while (pmd++, addr = next, addr != end); + return err; +} + +static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, + unsigned long addr, unsigned long end, + pte_fn_t fn, void *data) +{ + pud_t *pud; + unsigned long next; + int err; + + pud = pud_alloc(mm, pgd, addr); + if (!pud) + return -ENOMEM; + do { + next = pud_addr_end(addr, end); + err = apply_to_pmd_range(mm, pud, addr, next, fn, data); + if (err) + break; + } while (pud++, addr = next, addr != end); + return err; +} + +/* + * Scan a region of virtual memory, filling in page tables as necessary + * and calling a provided function on each leaf page table. + */ +int apply_to_page_range(struct mm_struct *mm, unsigned long addr, + unsigned long size, pte_fn_t fn, void *data) +{ + pgd_t *pgd; + unsigned long next; + unsigned long start = addr, end = addr + size; + int err; + + BUG_ON(addr >= end); + mmu_notifier_invalidate_range_start(mm, start, end); + pgd = pgd_offset(mm, addr); + do { + next = pgd_addr_end(addr, end); + err = apply_to_pud_range(mm, pgd, addr, next, fn, data); + if (err) + break; + } while (pgd++, addr = next, addr != end); + mmu_notifier_invalidate_range_end(mm, start, end); + return err; +} +EXPORT_SYMBOL_GPL(apply_to_page_range); + +/* + * handle_pte_fault chooses page fault handler according to an entry + * which was read non-atomically. Before making any commitment, on + * those architectures or configurations (e.g. i386 with PAE) which + * might give a mix of unmatched parts, do_swap_page and do_file_page + * must check under lock before unmapping the pte and proceeding + * (but do_wp_page is only called after already making such a check; + * and do_anonymous_page and do_no_page can safely check later on). + */ +static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, + pte_t *page_table, pte_t orig_pte) +{ + int same = 1; +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) + if (sizeof(pte_t) > sizeof(unsigned long)) { + spinlock_t *ptl = pte_lockptr(mm, pmd); + spin_lock(ptl); + same = pte_same(*page_table, orig_pte); + spin_unlock(ptl); + } +#endif + pte_unmap(page_table); + return same; +} + +/* + * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when + * servicing faults for write access. In the normal case, do always want + * pte_mkwrite. But get_user_pages can cause write faults for mappings + * that do not have writing enabled, when used by access_process_vm. + */ +static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) +{ + if (likely(vma->vm_flags & VM_WRITE)) + pte = pte_mkwrite(pte); + return pte; +} + +static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) +{ + /* + * If the source page was a PFN mapping, we don't have + * a "struct page" for it. We do a best-effort copy by + * just copying from the original user address. If that + * fails, we just zero-fill it. Live with it. + */ + if (unlikely(!src)) { + void *kaddr = kmap_atomic(dst, KM_USER0); + void __user *uaddr = (void __user *)(va & PAGE_MASK); + + /* + * This really shouldn't fail, because the page is there + * in the page tables. But it might just be unreadable, + * in which case we just give up and fill the result with + * zeroes. + */ + if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) + memset(kaddr, 0, PAGE_SIZE); + kunmap_atomic(kaddr, KM_USER0); + flush_dcache_page(dst); + } else + copy_user_highpage(dst, src, va, vma); +} + +/* + * This routine handles present pages, when users try to write + * to a shared page. It is done by copying the page to a new address + * and decrementing the shared-page counter for the old page. + * + * Note that this routine assumes that the protection checks have been + * done by the caller (the low-level page fault routine in most cases). + * Thus we can safely just mark it writable once we've done any necessary + * COW. + * + * We also mark the page dirty at this point even though the page will + * change only once the write actually happens. This avoids a few races, + * and potentially makes it more efficient. + * + * We enter with non-exclusive mmap_sem (to exclude vma changes, + * but allow concurrent faults), with pte both mapped and locked. + * We return with mmap_sem still held, but pte unmapped and unlocked. + */ +static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *page_table, pmd_t *pmd, + spinlock_t *ptl, pte_t orig_pte) +{ + struct page *old_page, *new_page; + pte_t entry; + int reuse = 0, ret = 0; + int page_mkwrite = 0; + struct page *dirty_page = NULL; + + old_page = vm_normal_page(vma, address, orig_pte); + if (!old_page) { + /* + * VM_MIXEDMAP !pfn_valid() case + * + * We should not cow pages in a shared writeable mapping. + * Just mark the pages writable as we can't do any dirty + * accounting on raw pfn maps. + */ + if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == + (VM_WRITE|VM_SHARED)) + goto reuse; + goto gotten; + } + + /* + * Take out anonymous pages first, anonymous shared vmas are + * not dirty accountable. + */ + if (PageAnon(old_page) && !PageKsm(old_page)) { + if (!trylock_page(old_page)) { + page_cache_get(old_page); + pte_unmap_unlock(page_table, ptl); + lock_page(old_page); + page_table = pte_offset_map_lock(mm, pmd, address, + &ptl); + if (!pte_same(*page_table, orig_pte)) { + unlock_page(old_page); + page_cache_release(old_page); + goto unlock; + } + page_cache_release(old_page); + } + reuse = reuse_swap_page(old_page); + unlock_page(old_page); + } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == + (VM_WRITE|VM_SHARED))) { + /* + * Only catch write-faults on shared writable pages, + * read-only shared pages can get COWed by + * get_user_pages(.write=1, .force=1). + */ + if (vma->vm_ops && vma->vm_ops->page_mkwrite) { + struct vm_fault vmf; + int tmp; + + vmf.virtual_address = (void __user *)(address & + PAGE_MASK); + vmf.pgoff = old_page->index; + vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; + vmf.page = old_page; + + /* + * Notify the address space that the page is about to + * become writable so that it can prohibit this or wait + * for the page to get into an appropriate state. + * + * We do this without the lock held, so that it can + * sleep if it needs to. + */ + page_cache_get(old_page); + pte_unmap_unlock(page_table, ptl); + + tmp = vma->vm_ops->page_mkwrite(vma, &vmf); + if (unlikely(tmp & + (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { + ret = tmp; + goto unwritable_page; + } + if (unlikely(!(tmp & VM_FAULT_LOCKED))) { + lock_page(old_page); + if (!old_page->mapping) { + ret = 0; /* retry the fault */ + unlock_page(old_page); + goto unwritable_page; + } + } else + VM_BUG_ON(!PageLocked(old_page)); + + /* + * Since we dropped the lock we need to revalidate + * the PTE as someone else may have changed it. If + * they did, we just return, as we can count on the + * MMU to tell us if they didn't also make it writable. + */ + page_table = pte_offset_map_lock(mm, pmd, address, + &ptl); + if (!pte_same(*page_table, orig_pte)) { + unlock_page(old_page); + page_cache_release(old_page); + goto unlock; + } + + page_mkwrite = 1; + } + dirty_page = old_page; + get_page(dirty_page); + reuse = 1; + } + + if (reuse) { +reuse: + flush_cache_page(vma, address, pte_pfn(orig_pte)); + entry = pte_mkyoung(orig_pte); + entry = maybe_mkwrite(pte_mkdirty(entry), vma); + if (ptep_set_access_flags(vma, address, page_table, entry,1)) + update_mmu_cache(vma, address, entry); + ret |= VM_FAULT_WRITE; + goto unlock; + } + + /* + * Ok, we need to copy. Oh, well.. + */ + page_cache_get(old_page); +gotten: + pte_unmap_unlock(page_table, ptl); + + if (unlikely(anon_vma_prepare(vma))) + goto oom; + + if (is_zero_pfn(pte_pfn(orig_pte))) { + new_page = alloc_zeroed_user_highpage_movable(vma, address); + if (!new_page) + goto oom; + } else { + new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); + if (!new_page) + goto oom; + cow_user_page(new_page, old_page, address, vma); + } + __SetPageUptodate(new_page); + + /* + * Don't let another task, with possibly unlocked vma, + * keep the mlocked page. + */ + if ((vma->vm_flags & VM_LOCKED) && old_page) { + lock_page(old_page); /* for LRU manipulation */ + clear_page_mlock(old_page); + unlock_page(old_page); + } + + if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) + goto oom_free_new; + + /* + * Re-check the pte - we dropped the lock + */ + page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + if (likely(pte_same(*page_table, orig_pte))) { + if (old_page) { + if (!PageAnon(old_page)) { + dec_mm_counter(mm, file_rss); + inc_mm_counter(mm, anon_rss); + } + } else + inc_mm_counter(mm, anon_rss); + flush_cache_page(vma, address, pte_pfn(orig_pte)); + entry = mk_pte(new_page, vma->vm_page_prot); + entry = maybe_mkwrite(pte_mkdirty(entry), vma); + /* + * Clear the pte entry and flush it first, before updating the + * pte with the new entry. This will avoid a race condition + * seen in the presence of one thread doing SMC and another + * thread doing COW. + */ + ptep_clear_flush(vma, address, page_table); + page_add_new_anon_rmap(new_page, vma, address); + /* + * We call the notify macro here because, when using secondary + * mmu page tables (such as kvm shadow page tables), we want the + * new page to be mapped directly into the secondary page table. + */ + set_pte_at_notify(mm, address, page_table, entry); + update_mmu_cache(vma, address, entry); + if (old_page) { + /* + * Only after switching the pte to the new page may + * we remove the mapcount here. Otherwise another + * process may come and find the rmap count decremented + * before the pte is switched to the new page, and + * "reuse" the old page writing into it while our pte + * here still points into it and can be read by other + * threads. + * + * The critical issue is to order this + * page_remove_rmap with the ptp_clear_flush above. + * Those stores are ordered by (if nothing else,) + * the barrier present in the atomic_add_negative + * in page_remove_rmap. + * + * Then the TLB flush in ptep_clear_flush ensures that + * no process can access the old page before the + * decremented mapcount is visible. And the old page + * cannot be reused until after the decremented + * mapcount is visible. So transitively, TLBs to + * old page will be flushed before it can be reused. + */ + page_remove_rmap(old_page); + } + + /* Free the old page.. */ + new_page = old_page; + ret |= VM_FAULT_WRITE; + } else + mem_cgroup_uncharge_page(new_page); + + if (new_page) + page_cache_release(new_page); + if (old_page) + page_cache_release(old_page); +unlock: + pte_unmap_unlock(page_table, ptl); + if (dirty_page) { + /* + * Yes, Virginia, this is actually required to prevent a race + * with clear_page_dirty_for_io() from clearing the page dirty + * bit after it clear all dirty ptes, but before a racing + * do_wp_page installs a dirty pte. + * + * do_no_page is protected similarly. + */ + if (!page_mkwrite) { + wait_on_page_locked(dirty_page); + set_page_dirty_balance(dirty_page, page_mkwrite); + } + put_page(dirty_page); + if (page_mkwrite) { + struct address_space *mapping = dirty_page->mapping; + + set_page_dirty(dirty_page); + unlock_page(dirty_page); + page_cache_release(dirty_page); + if (mapping) { + /* + * Some device drivers do not set page.mapping + * but still dirty their pages + */ + balance_dirty_pages_ratelimited(mapping); + } + } + + /* file_update_time outside page_lock */ + if (vma->vm_file) + file_update_time(vma->vm_file); + } + return ret; +oom_free_new: + page_cache_release(new_page); +oom: + if (old_page) { + if (page_mkwrite) { + unlock_page(old_page); + page_cache_release(old_page); + } + page_cache_release(old_page); + } + return VM_FAULT_OOM; + +unwritable_page: + page_cache_release(old_page); + return ret; +} + +/* + * Helper functions for unmap_mapping_range(). + * + * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __ + * + * We have to restart searching the prio_tree whenever we drop the lock, + * since the iterator is only valid while the lock is held, and anyway + * a later vma might be split and reinserted earlier while lock dropped. + * + * The list of nonlinear vmas could be handled more efficiently, using + * a placeholder, but handle it in the same way until a need is shown. + * It is important to search the prio_tree before nonlinear list: a vma + * may become nonlinear and be shifted from prio_tree to nonlinear list + * while the lock is dropped; but never shifted from list to prio_tree. + * + * In order to make forward progress despite restarting the search, + * vm_truncate_count is used to mark a vma as now dealt with, so we can + * quickly skip it next time around. Since the prio_tree search only + * shows us those vmas affected by unmapping the range in question, we + * can't efficiently keep all vmas in step with mapping->truncate_count: + * so instead reset them all whenever it wraps back to 0 (then go to 1). + * mapping->truncate_count and vma->vm_truncate_count are protected by + * i_mmap_lock. + * + * In order to make forward progress despite repeatedly restarting some + * large vma, note the restart_addr from unmap_vmas when it breaks out: + * and restart from that address when we reach that vma again. It might + * have been split or merged, shrunk or extended, but never shifted: so + * restart_addr remains valid so long as it remains in the vma's range. + * unmap_mapping_range forces truncate_count to leap over page-aligned + * values so we can save vma's restart_addr in its truncate_count field. + */ +#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK)) + +static void reset_vma_truncate_counts(struct address_space *mapping) +{ + struct vm_area_struct *vma; + struct prio_tree_iter iter; + + vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX) + vma->vm_truncate_count = 0; + list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) + vma->vm_truncate_count = 0; +} + +static int unmap_mapping_range_vma(struct vm_area_struct *vma, + unsigned long start_addr, unsigned long end_addr, + struct zap_details *details) +{ + unsigned long restart_addr; + int need_break; + + /* + * files that support invalidating or truncating portions of the + * file from under mmaped areas must have their ->fault function + * return a locked page (and set VM_FAULT_LOCKED in the return). + * This provides synchronisation against concurrent unmapping here. + */ + +again: + restart_addr = vma->vm_truncate_count; + if (is_restart_addr(restart_addr) && start_addr < restart_addr) { + start_addr = restart_addr; + if (start_addr >= end_addr) { + /* Top of vma has been split off since last time */ + vma->vm_truncate_count = details->truncate_count; + return 0; + } + } + + restart_addr = zap_page_range(vma, start_addr, + end_addr - start_addr, details); + need_break = need_resched() || spin_needbreak(details->i_mmap_lock); + + if (restart_addr >= end_addr) { + /* We have now completed this vma: mark it so */ + vma->vm_truncate_count = details->truncate_count; + if (!need_break) + return 0; + } else { + /* Note restart_addr in vma's truncate_count field */ + vma->vm_truncate_count = restart_addr; + if (!need_break) + goto again; + } + + spin_unlock(details->i_mmap_lock); + cond_resched(); + spin_lock(details->i_mmap_lock); + return -EINTR; +} + +static inline void unmap_mapping_range_tree(struct prio_tree_root *root, + struct zap_details *details) +{ + struct vm_area_struct *vma; + struct prio_tree_iter iter; + pgoff_t vba, vea, zba, zea; + +restart: + vma_prio_tree_foreach(vma, &iter, root, + details->first_index, details->last_index) { + /* Skip quickly over those we have already dealt with */ + if (vma->vm_truncate_count == details->truncate_count) + continue; + + vba = vma->vm_pgoff; + vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1; + /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ + zba = details->first_index; + if (zba < vba) + zba = vba; + zea = details->last_index; + if (zea > vea) + zea = vea; + + if (unmap_mapping_range_vma(vma, + ((zba - vba) << PAGE_SHIFT) + vma->vm_start, + ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, + details) < 0) + goto restart; + } +} + +static inline void unmap_mapping_range_list(struct list_head *head, + struct zap_details *details) +{ + struct vm_area_struct *vma; + + /* + * In nonlinear VMAs there is no correspondence between virtual address + * offset and file offset. So we must perform an exhaustive search + * across *all* the pages in each nonlinear VMA, not just the pages + * whose virtual address lies outside the file truncation point. + */ +restart: + list_for_each_entry(vma, head, shared.vm_set.list) { + /* Skip quickly over those we have already dealt with */ + if (vma->vm_truncate_count == details->truncate_count) + continue; + details->nonlinear_vma = vma; + if (unmap_mapping_range_vma(vma, vma->vm_start, + vma->vm_end, details) < 0) + goto restart; + } +} + +/** + * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file. + * @mapping: the address space containing mmaps to be unmapped. + * @holebegin: byte in first page to unmap, relative to the start of + * the underlying file. This will be rounded down to a PAGE_SIZE + * boundary. Note that this is different from truncate_pagecache(), which + * must keep the partial page. In contrast, we must get rid of + * partial pages. + * @holelen: size of prospective hole in bytes. This will be rounded + * up to a PAGE_SIZE boundary. A holelen of zero truncates to the + * end of the file. + * @even_cows: 1 when truncating a file, unmap even private COWed pages; + * but 0 when invalidating pagecache, don't throw away private data. + */ +void unmap_mapping_range(struct address_space *mapping, + loff_t const holebegin, loff_t const holelen, int even_cows) +{ + struct zap_details details; + pgoff_t hba = holebegin >> PAGE_SHIFT; + pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; + + /* Check for overflow. */ + if (sizeof(holelen) > sizeof(hlen)) { + long long holeend = + (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; + if (holeend & ~(long long)ULONG_MAX) + hlen = ULONG_MAX - hba + 1; + } + + details.check_mapping = even_cows? NULL: mapping; + details.nonlinear_vma = NULL; + details.first_index = hba; + details.last_index = hba + hlen - 1; + if (details.last_index < details.first_index) + details.last_index = ULONG_MAX; + details.i_mmap_lock = &mapping->i_mmap_lock; + + mutex_lock(&mapping->unmap_mutex); + spin_lock(&mapping->i_mmap_lock); + + /* Protect against endless unmapping loops */ + mapping->truncate_count++; + if (unlikely(is_restart_addr(mapping->truncate_count))) { + if (mapping->truncate_count == 0) + reset_vma_truncate_counts(mapping); + mapping->truncate_count++; + } + details.truncate_count = mapping->truncate_count; + + if (unlikely(!prio_tree_empty(&mapping->i_mmap))) + unmap_mapping_range_tree(&mapping->i_mmap, &details); + if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) + unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); + spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->unmap_mutex); +} +EXPORT_SYMBOL(unmap_mapping_range); + +int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) +{ + struct address_space *mapping = inode->i_mapping; + + /* + * If the underlying filesystem is not going to provide + * a way to truncate a range of blocks (punch a hole) - + * we should return failure right now. + */ + if (!inode->i_op->truncate_range) + return -ENOSYS; + + mutex_lock(&inode->i_mutex); + down_write(&inode->i_alloc_sem); + unmap_mapping_range(mapping, offset, (end - offset), 1); + truncate_inode_pages_range(mapping, offset, end); + unmap_mapping_range(mapping, offset, (end - offset), 1); + inode->i_op->truncate_range(inode, offset, end); + up_write(&inode->i_alloc_sem); + mutex_unlock(&inode->i_mutex); + + return 0; +} + +/* + * We enter with non-exclusive mmap_sem (to exclude vma changes, + * but allow concurrent faults), and pte mapped but not yet locked. + * We return with mmap_sem still held, but pte unmapped and unlocked. + */ +static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *page_table, pmd_t *pmd, + unsigned int flags, pte_t orig_pte) +{ + spinlock_t *ptl; + struct page *page; + swp_entry_t entry; + pte_t pte; + struct mem_cgroup *ptr = NULL; + int ret = 0; + + if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) + goto out; + + entry = pte_to_swp_entry(orig_pte); + if (unlikely(non_swap_entry(entry))) { + if (is_migration_entry(entry)) { + migration_entry_wait(mm, pmd, address); + } else if (is_hwpoison_entry(entry)) { + ret = VM_FAULT_HWPOISON; + } else { + print_bad_pte(vma, address, orig_pte, NULL); + ret = VM_FAULT_SIGBUS; + } + goto out; + } + delayacct_set_flag(DELAYACCT_PF_SWAPIN); + page = lookup_swap_cache(entry); + if (!page) { + grab_swap_token(mm); /* Contend for token _before_ read-in */ + page = swapin_readahead(entry, + GFP_HIGHUSER_MOVABLE, vma, address); + if (!page) { + /* + * Back out if somebody else faulted in this pte + * while we released the pte lock. + */ + page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + if (likely(pte_same(*page_table, orig_pte))) + ret = VM_FAULT_OOM; + delayacct_clear_flag(DELAYACCT_PF_SWAPIN); + goto unlock; + } + + /* Had to read the page from swap area: Major fault */ + ret = VM_FAULT_MAJOR; + count_vm_event(PGMAJFAULT); + } else if (PageHWPoison(page)) { + ret = VM_FAULT_HWPOISON; + delayacct_clear_flag(DELAYACCT_PF_SWAPIN); + goto out_release; + } + + lock_page(page); + delayacct_clear_flag(DELAYACCT_PF_SWAPIN); + + if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { + ret = VM_FAULT_OOM; + goto out_page; + } + + /* + * Back out if somebody else already faulted in this pte. + */ + page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + if (unlikely(!pte_same(*page_table, orig_pte))) + goto out_nomap; + + if (unlikely(!PageUptodate(page))) { + ret = VM_FAULT_SIGBUS; + goto out_nomap; + } + + /* + * The page isn't present yet, go ahead with the fault. + * + * Be careful about the sequence of operations here. + * To get its accounting right, reuse_swap_page() must be called + * while the page is counted on swap but not yet in mapcount i.e. + * before page_add_anon_rmap() and swap_free(); try_to_free_swap() + * must be called after the swap_free(), or it will never succeed. + * Because delete_from_swap_page() may be called by reuse_swap_page(), + * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry + * in page->private. In this case, a record in swap_cgroup is silently + * discarded at swap_free(). + */ + + inc_mm_counter(mm, anon_rss); + pte = mk_pte(page, vma->vm_page_prot); + if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) { + pte = maybe_mkwrite(pte_mkdirty(pte), vma); + flags &= ~FAULT_FLAG_WRITE; + } + flush_icache_page(vma, page); + set_pte_at(mm, address, page_table, pte); + page_add_anon_rmap(page, vma, address); + /* It's better to call commit-charge after rmap is established */ + mem_cgroup_commit_charge_swapin(page, ptr); + + swap_free(entry); + if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) + try_to_free_swap(page); + unlock_page(page); + + if (flags & FAULT_FLAG_WRITE) { + ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); + if (ret & VM_FAULT_ERROR) + ret &= VM_FAULT_ERROR; + goto out; + } + + /* No need to invalidate - it was non-present before */ + update_mmu_cache(vma, address, pte); +unlock: + pte_unmap_unlock(page_table, ptl); +out: + return ret; +out_nomap: + mem_cgroup_cancel_charge_swapin(ptr); + pte_unmap_unlock(page_table, ptl); +out_page: + unlock_page(page); +out_release: + page_cache_release(page); + return ret; +} + +/* + * This is like a special single-page "expand_{down|up}wards()", + * except we must first make sure that 'address{-|+}PAGE_SIZE' + * doesn't hit another vma. + */ +static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) +{ + address &= PAGE_MASK; + if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { + struct vm_area_struct *prev = vma->vm_prev; + + /* + * Is there a mapping abutting this one below? + * + * That's only ok if it's the same stack mapping + * that has gotten split.. + */ + if (prev && prev->vm_end == address) + return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; + + expand_stack(vma, address - PAGE_SIZE); + } + if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { + struct vm_area_struct *next = vma->vm_next; + + /* As VM_GROWSDOWN but s/below/above/ */ + if (next && next->vm_start == address + PAGE_SIZE) + return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; + + expand_upwards(vma, address + PAGE_SIZE); + } + return 0; +} + +/* + * We enter with non-exclusive mmap_sem (to exclude vma changes, + * but allow concurrent faults), and pte mapped but not yet locked. + * We return with mmap_sem still held, but pte unmapped and unlocked. + */ +static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *page_table, pmd_t *pmd, + unsigned int flags) +{ + struct page *page; + spinlock_t *ptl; + pte_t entry; + + pte_unmap(page_table); + + /* Check if we need to add a guard page to the stack */ + if (check_stack_guard_page(vma, address) < 0) + return VM_FAULT_SIGBUS; + + /* Use the zero-page for reads */ + if (!(flags & FAULT_FLAG_WRITE)) { + entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), + vma->vm_page_prot)); + page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + if (!pte_none(*page_table)) + goto unlock; + goto setpte; + } + + /* Allocate our own private page. */ + if (unlikely(anon_vma_prepare(vma))) + goto oom; + page = alloc_zeroed_user_highpage_movable(vma, address); + if (!page) + goto oom; + __SetPageUptodate(page); + + if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) + goto oom_free_page; + + entry = mk_pte(page, vma->vm_page_prot); + if (vma->vm_flags & VM_WRITE) + entry = pte_mkwrite(pte_mkdirty(entry)); + + page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + if (!pte_none(*page_table)) + goto release; + + inc_mm_counter(mm, anon_rss); + page_add_new_anon_rmap(page, vma, address); +setpte: + set_pte_at(mm, address, page_table, entry); + + /* No need to invalidate - it was non-present before */ + update_mmu_cache(vma, address, entry); +unlock: + pte_unmap_unlock(page_table, ptl); + return 0; +release: + mem_cgroup_uncharge_page(page); + page_cache_release(page); + goto unlock; +oom_free_page: + page_cache_release(page); +oom: + return VM_FAULT_OOM; +} + +/* + * __do_fault() tries to create a new page mapping. It aggressively + * tries to share with existing pages, but makes a separate copy if + * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid + * the next page fault. + * + * As this is called only for pages that do not currently exist, we + * do not need to flush old virtual caches or the TLB. + * + * We enter with non-exclusive mmap_sem (to exclude vma changes, + * but allow concurrent faults), and pte neither mapped nor locked. + * We return with mmap_sem still held, but pte unmapped and unlocked. + */ +static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, + pgoff_t pgoff, unsigned int flags, pte_t orig_pte) +{ + pte_t *page_table; + spinlock_t *ptl; + struct page *page; + pte_t entry; + int anon = 0; + int charged = 0; + struct page *dirty_page = NULL; + struct vm_fault vmf; + int ret; + int page_mkwrite = 0; + + vmf.virtual_address = (void __user *)(address & PAGE_MASK); + vmf.pgoff = pgoff; + vmf.flags = flags; + vmf.page = NULL; + + ret = vma->vm_ops->fault(vma, &vmf); + if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) + return ret; + + if (unlikely(PageHWPoison(vmf.page))) { + if (ret & VM_FAULT_LOCKED) + unlock_page(vmf.page); + return VM_FAULT_HWPOISON; + } + + /* + * For consistency in subsequent calls, make the faulted page always + * locked. + */ + if (unlikely(!(ret & VM_FAULT_LOCKED))) + lock_page(vmf.page); + else + VM_BUG_ON(!PageLocked(vmf.page)); + + /* + * Should we do an early C-O-W break? + */ + page = vmf.page; + if (flags & FAULT_FLAG_WRITE) { + if (!(vma->vm_flags & VM_SHARED)) { + anon = 1; + if (unlikely(anon_vma_prepare(vma))) { + ret = VM_FAULT_OOM; + goto out; + } + page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, + vma, address); + if (!page) { + ret = VM_FAULT_OOM; + goto out; + } + if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) { + ret = VM_FAULT_OOM; + page_cache_release(page); + goto out; + } + charged = 1; + /* + * Don't let another task, with possibly unlocked vma, + * keep the mlocked page. + */ + if (vma->vm_flags & VM_LOCKED) + clear_page_mlock(vmf.page); + copy_user_highpage(page, vmf.page, address, vma); + __SetPageUptodate(page); + } else { + /* + * If the page will be shareable, see if the backing + * address space wants to know that the page is about + * to become writable + */ + if (vma->vm_ops->page_mkwrite) { + int tmp; + + unlock_page(page); + vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; + tmp = vma->vm_ops->page_mkwrite(vma, &vmf); + if (unlikely(tmp & + (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { + ret = tmp; + goto unwritable_page; + } + if (unlikely(!(tmp & VM_FAULT_LOCKED))) { + lock_page(page); + if (!page->mapping) { + ret = 0; /* retry the fault */ + unlock_page(page); + goto unwritable_page; + } + } else + VM_BUG_ON(!PageLocked(page)); + page_mkwrite = 1; + } + } + + } + + page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + + /* + * This silly early PAGE_DIRTY setting removes a race + * due to the bad i386 page protection. But it's valid + * for other architectures too. + * + * Note that if FAULT_FLAG_WRITE is set, we either now have + * an exclusive copy of the page, or this is a shared mapping, + * so we can make it writable and dirty to avoid having to + * handle that later. + */ + /* Only go through if we didn't race with anybody else... */ + if (likely(pte_same(*page_table, orig_pte))) { + flush_icache_page(vma, page); + entry = mk_pte(page, vma->vm_page_prot); + if (flags & FAULT_FLAG_WRITE) + entry = maybe_mkwrite(pte_mkdirty(entry), vma); + if (anon) { + inc_mm_counter(mm, anon_rss); + page_add_new_anon_rmap(page, vma, address); + } else { + inc_mm_counter(mm, file_rss); + page_add_file_rmap(page); + if (flags & FAULT_FLAG_WRITE) { + dirty_page = page; + get_page(dirty_page); + } + } + set_pte_at(mm, address, page_table, entry); + + /* no need to invalidate: a not-present page won't be cached */ + update_mmu_cache(vma, address, entry); + } else { + if (charged) + mem_cgroup_uncharge_page(page); + if (anon) + page_cache_release(page); + else + anon = 1; /* no anon but release faulted_page */ + } + + pte_unmap_unlock(page_table, ptl); + +out: + if (dirty_page) { + struct address_space *mapping = page->mapping; + + if (set_page_dirty(dirty_page)) + page_mkwrite = 1; + unlock_page(dirty_page); + put_page(dirty_page); + if (page_mkwrite && mapping) { + /* + * Some device drivers do not set page.mapping but still + * dirty their pages + */ + balance_dirty_pages_ratelimited(mapping); + } + + /* file_update_time outside page_lock */ + if (vma->vm_file) + file_update_time(vma->vm_file); + } else { + unlock_page(vmf.page); + if (anon) + page_cache_release(vmf.page); + } + + return ret; + +unwritable_page: + page_cache_release(page); + return ret; +} + +static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *page_table, pmd_t *pmd, + unsigned int flags, pte_t orig_pte) +{ + pgoff_t pgoff = (((address & PAGE_MASK) + - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; + + pte_unmap(page_table); + return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); +} + +/* + * Fault of a previously existing named mapping. Repopulate the pte + * from the encoded file_pte if possible. This enables swappable + * nonlinear vmas. + * + * We enter with non-exclusive mmap_sem (to exclude vma changes, + * but allow concurrent faults), and pte mapped but not yet locked. + * We return with mmap_sem still held, but pte unmapped and unlocked. + */ +static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *page_table, pmd_t *pmd, + unsigned int flags, pte_t orig_pte) +{ + pgoff_t pgoff; + + flags |= FAULT_FLAG_NONLINEAR; + + if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) + return 0; + + if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) { + /* + * Page table corrupted: show pte and kill process. + */ + print_bad_pte(vma, address, orig_pte, NULL); + return VM_FAULT_SIGBUS; + } + + pgoff = pte_to_pgoff(orig_pte); + return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); +} + +/* + * These routines also need to handle stuff like marking pages dirty + * and/or accessed for architectures that don't do it in hardware (most + * RISC architectures). The early dirtying is also good on the i386. + * + * There is also a hook called "update_mmu_cache()" that architectures + * with external mmu caches can use to update those (ie the Sparc or + * PowerPC hashed page tables that act as extended TLBs). + * + * We enter with non-exclusive mmap_sem (to exclude vma changes, + * but allow concurrent faults), and pte mapped but not yet locked. + * We return with mmap_sem still held, but pte unmapped and unlocked. + */ +static inline int handle_pte_fault(struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, + pte_t *pte, pmd_t *pmd, unsigned int flags) +{ + pte_t entry; + spinlock_t *ptl; + + entry = *pte; + if (!pte_present(entry)) { + if (pte_none(entry)) { + if (vma->vm_ops) { + if (likely(vma->vm_ops->fault)) + return do_linear_fault(mm, vma, address, + pte, pmd, flags, entry); + } + return do_anonymous_page(mm, vma, address, + pte, pmd, flags); + } + if (pte_file(entry)) + return do_nonlinear_fault(mm, vma, address, + pte, pmd, flags, entry); + return do_swap_page(mm, vma, address, + pte, pmd, flags, entry); + } + + ptl = pte_lockptr(mm, pmd); + spin_lock(ptl); + if (unlikely(!pte_same(*pte, entry))) + goto unlock; + if (flags & FAULT_FLAG_WRITE) { + if (!pte_write(entry)) + return do_wp_page(mm, vma, address, + pte, pmd, ptl, entry); + entry = pte_mkdirty(entry); + } + entry = pte_mkyoung(entry); + if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { + update_mmu_cache(vma, address, entry); + } else { + /* + * This is needed only for protection faults but the arch code + * is not yet telling us if this is a protection fault or not. + * This still avoids useless tlb flushes for .text page faults + * with threads. + */ + if (flags & FAULT_FLAG_WRITE) + flush_tlb_page(vma, address); + } +unlock: + pte_unmap_unlock(pte, ptl); + return 0; +} + +/* + * By the time we get here, we already hold the mm semaphore + */ +int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, unsigned int flags) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + __set_current_state(TASK_RUNNING); + + count_vm_event(PGFAULT); + + if (unlikely(is_vm_hugetlb_page(vma))) + return hugetlb_fault(mm, vma, address, flags); + + pgd = pgd_offset(mm, address); + pud = pud_alloc(mm, pgd, address); + if (!pud) + return VM_FAULT_OOM; + pmd = pmd_alloc(mm, pud, address); + if (!pmd) + return VM_FAULT_OOM; + pte = pte_alloc_map(mm, pmd, address); + if (!pte) + return VM_FAULT_OOM; + + return handle_pte_fault(mm, vma, address, pte, pmd, flags); +} + +#ifndef __PAGETABLE_PUD_FOLDED +/* + * Allocate page upper directory. + * We've already handled the fast-path in-line. + */ +int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) +{ + pud_t *new = pud_alloc_one(mm, address); + if (!new) + return -ENOMEM; + + smp_wmb(); /* See comment in __pte_alloc */ + + spin_lock(&mm->page_table_lock); + if (pgd_present(*pgd)) /* Another has populated it */ + pud_free(mm, new); + else + pgd_populate(mm, pgd, new); + spin_unlock(&mm->page_table_lock); + return 0; +} +#endif /* __PAGETABLE_PUD_FOLDED */ + +#ifndef __PAGETABLE_PMD_FOLDED +/* + * Allocate page middle directory. + * We've already handled the fast-path in-line. + */ +int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) +{ + pmd_t *new = pmd_alloc_one(mm, address); + if (!new) + return -ENOMEM; + + smp_wmb(); /* See comment in __pte_alloc */ + + spin_lock(&mm->page_table_lock); +#ifndef __ARCH_HAS_4LEVEL_HACK + if (pud_present(*pud)) /* Another has populated it */ + pmd_free(mm, new); + else + pud_populate(mm, pud, new); +#else + if (pgd_present(*pud)) /* Another has populated it */ + pmd_free(mm, new); + else + pgd_populate(mm, pud, new); +#endif /* __ARCH_HAS_4LEVEL_HACK */ + spin_unlock(&mm->page_table_lock); + return 0; +} +#endif /* __PAGETABLE_PMD_FOLDED */ + +int make_pages_present(unsigned long addr, unsigned long end) +{ + int ret, len, write; + struct vm_area_struct * vma; + + vma = find_vma(current->mm, addr); + if (!vma) + return -ENOMEM; + write = (vma->vm_flags & VM_WRITE) != 0; + BUG_ON(addr >= end); + BUG_ON(end > vma->vm_end); + len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE; + ret = get_user_pages(current, current->mm, addr, + len, write, 0, NULL, NULL); + if (ret < 0) + return ret; + return ret == len ? 0 : -EFAULT; +} + +#if !defined(__HAVE_ARCH_GATE_AREA) + +#if defined(AT_SYSINFO_EHDR) +static struct vm_area_struct gate_vma; + +static int __init gate_vma_init(void) +{ + gate_vma.vm_mm = NULL; + gate_vma.vm_start = FIXADDR_USER_START; + gate_vma.vm_end = FIXADDR_USER_END; + gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; + gate_vma.vm_page_prot = __P101; + /* + * Make sure the vDSO gets into every core dump. + * Dumping its contents makes post-mortem fully interpretable later + * without matching up the same kernel and hardware config to see + * what PC values meant. + */ + gate_vma.vm_flags |= VM_ALWAYSDUMP; + return 0; +} +__initcall(gate_vma_init); +#endif + +struct vm_area_struct *get_gate_vma(struct task_struct *tsk) +{ +#ifdef AT_SYSINFO_EHDR + return &gate_vma; +#else + return NULL; +#endif +} + +int in_gate_area_no_task(unsigned long addr) +{ +#ifdef AT_SYSINFO_EHDR + if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) + return 1; +#endif + return 0; +} + +#endif /* __HAVE_ARCH_GATE_AREA */ + +static int follow_pte(struct mm_struct *mm, unsigned long address, + pte_t **ptepp, spinlock_t **ptlp) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *ptep; + + pgd = pgd_offset(mm, address); + if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) + goto out; + + pud = pud_offset(pgd, address); + if (pud_none(*pud) || unlikely(pud_bad(*pud))) + goto out; + + pmd = pmd_offset(pud, address); + if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) + goto out; + + /* We cannot handle huge page PFN maps. Luckily they don't exist. */ + if (pmd_huge(*pmd)) + goto out; + + ptep = pte_offset_map_lock(mm, pmd, address, ptlp); + if (!ptep) + goto out; + if (!pte_present(*ptep)) + goto unlock; + *ptepp = ptep; + return 0; +unlock: + pte_unmap_unlock(ptep, *ptlp); +out: + return -EINVAL; +} + +/** + * follow_pfn - look up PFN at a user virtual address + * @vma: memory mapping + * @address: user virtual address + * @pfn: location to store found PFN + * + * Only IO mappings and raw PFN mappings are allowed. + * + * Returns zero and the pfn at @pfn on success, -ve otherwise. + */ +int follow_pfn(struct vm_area_struct *vma, unsigned long address, + unsigned long *pfn) +{ + int ret = -EINVAL; + spinlock_t *ptl; + pte_t *ptep; + + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) + return ret; + + ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); + if (ret) + return ret; + *pfn = pte_pfn(*ptep); + pte_unmap_unlock(ptep, ptl); + return 0; +} +EXPORT_SYMBOL(follow_pfn); + +#ifdef CONFIG_HAVE_IOREMAP_PROT +int follow_phys(struct vm_area_struct *vma, + unsigned long address, unsigned int flags, + unsigned long *prot, resource_size_t *phys) +{ + int ret = -EINVAL; + pte_t *ptep, pte; + spinlock_t *ptl; + + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) + goto out; + + if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) + goto out; + pte = *ptep; + + if ((flags & FOLL_WRITE) && !pte_write(pte)) + goto unlock; + + *prot = pgprot_val(pte_pgprot(pte)); + *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; + + ret = 0; +unlock: + pte_unmap_unlock(ptep, ptl); +out: + return ret; +} + +int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write) +{ + resource_size_t phys_addr; + unsigned long prot = 0; + void __iomem *maddr; + int offset = addr & (PAGE_SIZE-1); + + if (follow_phys(vma, addr, write, &prot, &phys_addr)) + return -EINVAL; + + maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); + if (write) + memcpy_toio(maddr + offset, buf, len); + else + memcpy_fromio(buf, maddr + offset, len); + iounmap(maddr); + + return len; +} +#endif + +/* + * Access another process' address space. + * Source/target buffer must be kernel space, + * Do not walk the page table directly, use get_user_pages + */ +int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) +{ + struct mm_struct *mm; + struct vm_area_struct *vma; + void *old_buf = buf; + + mm = get_task_mm(tsk); + if (!mm) + return 0; + + down_read(&mm->mmap_sem); + /* ignore errors, just check how much was successfully transferred */ + while (len) { + int bytes, ret, offset; + void *maddr; + struct page *page = NULL; + + ret = get_user_pages(tsk, mm, addr, 1, + write, 1, &page, &vma); + if (ret <= 0) { + /* + * Check if this is a VM_IO | VM_PFNMAP VMA, which + * we can access using slightly different code. + */ +#ifdef CONFIG_HAVE_IOREMAP_PROT + vma = find_vma(mm, addr); + if (!vma) + break; + if (vma->vm_ops && vma->vm_ops->access) + ret = vma->vm_ops->access(vma, addr, buf, + len, write); + if (ret <= 0) +#endif + break; + bytes = ret; + } else { + bytes = len; + offset = addr & (PAGE_SIZE-1); + if (bytes > PAGE_SIZE-offset) + bytes = PAGE_SIZE-offset; + + maddr = kmap(page); + if (write) { + copy_to_user_page(vma, page, addr, + maddr + offset, buf, bytes); + set_page_dirty_lock(page); + } else { + copy_from_user_page(vma, page, addr, + buf, maddr + offset, bytes); + } + kunmap(page); + page_cache_release(page); + } + len -= bytes; + buf += bytes; + addr += bytes; + } + up_read(&mm->mmap_sem); + mmput(mm); + + return buf - old_buf; +} + +/* + * Print the name of a VMA. + */ +void print_vma_addr(char *prefix, unsigned long ip) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + + /* + * Do not print if we are in atomic + * contexts (in exception stacks, etc.): + */ + if (preempt_count()) + return; + + down_read(&mm->mmap_sem); + vma = find_vma(mm, ip); + if (vma && vma->vm_file) { + struct file *f = vma->vm_file; + char *buf = (char *)__get_free_page(GFP_KERNEL); + if (buf) { + char *p, *s; + + p = d_path(&f->f_path, buf, PAGE_SIZE); + if (IS_ERR(p)) + p = "?"; + s = strrchr(p, '/'); + if (s) + p = s+1; + printk("%s%s[%lx+%lx]", prefix, p, + vma->vm_start, + vma->vm_end - vma->vm_start); + free_page((unsigned long)buf); + } + } + up_read(¤t->mm->mmap_sem); +} + +#ifdef CONFIG_PROVE_LOCKING +void might_fault(void) +{ + /* + * Some code (nfs/sunrpc) uses socket ops on kernel memory while + * holding the mmap_sem, this is safe because kernel memory doesn't + * get paged out, therefore we'll never actually fault, and the + * below annotations will generate false positives. + */ + if (segment_eq(get_fs(), KERNEL_DS)) + return; + + might_sleep(); + /* + * it would be nicer only to annotate paths which are not under + * pagefault_disable, however that requires a larger audit and + * providing helpers like get_user_atomic. + */ + if (!in_atomic() && current->mm) + might_lock_read(¤t->mm->mmap_sem); +} +EXPORT_SYMBOL(might_fault); +#endif diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/mlock.c kernel-2.6.32.54.vs/linux-2.6.32/mm/mlock.c --- kernel-2.6.32.54/linux-2.6.32/mm/mlock.c 2012-01-16 15:01:39.980725193 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/mlock.c 2012-01-16 14:51:22.065408183 +0100 @@ -18,6 +18,7 @@ #include #include #include +#include #include "internal.h" @@ -414,7 +415,7 @@ nr_pages = (end - start) >> PAGE_SHIFT; if (!lock) nr_pages = -nr_pages; - mm->locked_vm += nr_pages; + vx_vmlocked_add(mm, nr_pages); /* * vm_flags is protected by the mmap_sem held in write mode. @@ -487,7 +488,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) { - unsigned long locked; + unsigned long locked, grow; unsigned long lock_limit; int error = -ENOMEM; @@ -500,8 +501,10 @@ len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; - locked = len >> PAGE_SHIFT; - locked += current->mm->locked_vm; + grow = len >> PAGE_SHIFT; + if (!vx_vmlocked_avail(current->mm, grow)) + goto out; + locked = current->mm->locked_vm + grow; lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; lock_limit >>= PAGE_SHIFT; @@ -509,6 +512,7 @@ /* check against resource limits */ if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) error = do_mlock(start, len, 1); +out: up_write(¤t->mm->mmap_sem); return error; } @@ -570,6 +574,8 @@ lock_limit >>= PAGE_SHIFT; ret = -ENOMEM; + if (!vx_vmlocked_avail(current->mm, current->mm->total_vm)) + goto out; if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || capable(CAP_IPC_LOCK)) ret = do_mlockall(flags); @@ -644,8 +650,10 @@ if (lim < vm) goto out; - mm->total_vm += pgsz; - mm->locked_vm += pgsz; + // mm->total_vm += pgsz; + vx_vmpages_add(mm, pgsz); + // mm->locked_vm += pgsz; + vx_vmlocked_add(mm, pgsz); error = 0; out: @@ -659,8 +667,10 @@ down_write(&mm->mmap_sem); - mm->total_vm -= pgsz; - mm->locked_vm -= pgsz; + // mm->total_vm -= pgsz; + vx_vmpages_sub(mm, pgsz); + // mm->locked_vm -= pgsz; + vx_vmlocked_sub(mm, pgsz); up_write(&mm->mmap_sem); } diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/mmap.c kernel-2.6.32.54.vs/linux-2.6.32/mm/mmap.c --- kernel-2.6.32.54/linux-2.6.32/mm/mmap.c 2012-01-16 15:01:39.984725179 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/mmap.c 2012-01-16 14:51:22.069408169 +0100 @@ -1224,7 +1224,8 @@ out: perf_event_mmap(vma); - mm->total_vm += len >> PAGE_SHIFT; + // mm->total_vm += len >> PAGE_SHIFT; + vx_vmpages_add(mm, len >> PAGE_SHIFT); vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); if (vm_flags & VM_LOCKED) { /* @@ -1233,7 +1234,8 @@ long nr_pages = mlock_vma_pages_range(vma, addr, addr + len); if (nr_pages < 0) return nr_pages; /* vma gone! */ - mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages; + // mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages; + vx_vmlocked_add(mm, (len >> PAGE_SHIFT) - nr_pages); } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK)) make_pages_present(addr, addr + len); return addr; @@ -1588,9 +1590,9 @@ return -ENOMEM; /* Ok, everything looks good - let it rip */ - mm->total_vm += grow; + vx_vmpages_add(mm, grow); if (vma->vm_flags & VM_LOCKED) - mm->locked_vm += grow; + vx_vmlocked_add(mm, grow); vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); return 0; } @@ -1768,7 +1770,8 @@ do { long nrpages = vma_pages(vma); - mm->total_vm -= nrpages; + // mm->total_vm -= nrpages; + vx_vmpages_sub(mm, nrpages); vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); vma = remove_vma(vma); } while (vma); @@ -1943,7 +1946,8 @@ struct vm_area_struct *tmp = vma; while (tmp && tmp->vm_start < end) { if (tmp->vm_flags & VM_LOCKED) { - mm->locked_vm -= vma_pages(tmp); + // mm->locked_vm -= vma_pages(tmp); + vx_vmlocked_sub(mm, vma_pages(tmp)); munlock_vma_pages_all(tmp); } tmp = tmp->vm_next; @@ -2026,6 +2030,8 @@ lock_limit >>= PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) return -EAGAIN; + if (!vx_vmlocked_avail(mm, len >> PAGE_SHIFT)) + return -ENOMEM; } /* @@ -2052,7 +2058,8 @@ if (mm->map_count > sysctl_max_map_count) return -ENOMEM; - if (security_vm_enough_memory(len >> PAGE_SHIFT)) + if (security_vm_enough_memory(len >> PAGE_SHIFT) || + !vx_vmpages_avail(mm, len >> PAGE_SHIFT)) return -ENOMEM; /* Can we just expand an old private anonymous mapping? */ @@ -2078,10 +2085,13 @@ vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); out: - mm->total_vm += len >> PAGE_SHIFT; + // mm->total_vm += len >> PAGE_SHIFT; + vx_vmpages_add(mm, len >> PAGE_SHIFT); + if (flags & VM_LOCKED) { if (!mlock_vma_pages_range(vma, addr, addr + len)) - mm->locked_vm += (len >> PAGE_SHIFT); + // mm->locked_vm += (len >> PAGE_SHIFT); + vx_vmlocked_add(mm, len >> PAGE_SHIFT); } return addr; } @@ -2125,6 +2135,11 @@ free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0); tlb_finish_mmu(tlb, 0, end); + set_mm_counter(mm, file_rss, 0); + set_mm_counter(mm, anon_rss, 0); + vx_vmpages_sub(mm, mm->total_vm); + vx_vmlocked_sub(mm, mm->locked_vm); + /* * Walk the list again, actually closing and freeing it, * with preemption enabled, without holding any MM locks. @@ -2164,7 +2179,8 @@ if (__vma && __vma->vm_start < vma->vm_end) return -ENOMEM; if ((vma->vm_flags & VM_ACCOUNT) && - security_vm_enough_memory_mm(mm, vma_pages(vma))) + (security_vm_enough_memory_mm(mm, vma_pages(vma)) || + !vx_vmpages_avail(mm, vma_pages(vma)))) return -ENOMEM; vma_link(mm, vma, prev, rb_link, rb_parent); return 0; @@ -2240,6 +2256,8 @@ if (cur + npages > lim) return 0; + if (!vx_vmpages_avail(mm, npages)) + return 0; return 1; } @@ -2321,7 +2339,7 @@ if (ret) goto out; - mm->total_vm += len >> PAGE_SHIFT; + vx_vmpages_add(mm, len >> PAGE_SHIFT); perf_event_mmap(vma); diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/mremap.c kernel-2.6.32.54.vs/linux-2.6.32/mm/mremap.c --- kernel-2.6.32.54/linux-2.6.32/mm/mremap.c 2012-01-16 15:01:39.984725179 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/mremap.c 2012-01-16 14:51:22.069408169 +0100 @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -232,7 +233,7 @@ * If this were a serious issue, we'd add a flag to do_munmap(). */ hiwater_vm = mm->hiwater_vm; - mm->total_vm += new_len >> PAGE_SHIFT; + vx_vmpages_add(mm, new_len >> PAGE_SHIFT); vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); if (do_munmap(mm, old_addr, old_len) < 0) { @@ -250,7 +251,7 @@ } if (vm_flags & VM_LOCKED) { - mm->locked_vm += new_len >> PAGE_SHIFT; + vx_vmlocked_add(mm, new_len >> PAGE_SHIFT); if (new_len > old_len) mlock_vma_pages_range(new_vma, new_addr + old_len, new_addr + new_len); @@ -468,10 +469,12 @@ vma_adjust(vma, vma->vm_start, addr + new_len, vma->vm_pgoff, NULL); - mm->total_vm += pages; + // mm->total_vm += pages; + vx_vmpages_add(mm, pages); vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); if (vma->vm_flags & VM_LOCKED) { - mm->locked_vm += pages; + // mm->locked_vm += pages; + vx_vmlocked_add(mm, pages); mlock_vma_pages_range(vma, addr + old_len, addr + new_len); } diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/nommu.c kernel-2.6.32.54.vs/linux-2.6.32/mm/nommu.c --- kernel-2.6.32.54/linux-2.6.32/mm/nommu.c 2012-01-16 15:01:39.984725179 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/nommu.c 2012-01-16 14:51:22.073408155 +0100 @@ -1349,7 +1349,7 @@ /* okay... we have a mapping; now we have to register it */ result = vma->vm_start; - current->mm->total_vm += len >> PAGE_SHIFT; + vx_vmpages_add(current->mm, len >> PAGE_SHIFT); share: add_vma_to_mm(current->mm, vma); @@ -1609,7 +1609,7 @@ kenter(""); - mm->total_vm = 0; + vx_vmpages_sub(mm, mm->total_vm); while ((vma = mm->mmap)) { mm->mmap = vma->vm_next; diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/oom_kill.c kernel-2.6.32.54.vs/linux-2.6.32/mm/oom_kill.c --- kernel-2.6.32.54/linux-2.6.32/mm/oom_kill.c 2012-01-16 15:01:39.984725179 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/oom_kill.c 2012-01-16 14:51:22.073408155 +0100 @@ -27,6 +27,9 @@ #include #include #include +#include +#include +#include int sysctl_panic_on_oom; int sysctl_oom_kill_allocating_task; @@ -186,9 +189,21 @@ points >>= -(oom_adj); } + /* + * add points for context badness and + * reduce badness for processes belonging to + * a different context + */ + + points += vx_badness(p, mm); + + if ((vx_current_xid() > 1) && + vx_current_xid() != vx_task_xid(p)) + points /= 16; + #ifdef DEBUG - printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n", - p->pid, p->comm, points); + printk(KERN_DEBUG "OOMkill: task %d:#%u (%s) got %d points\n", + task_pid_nr(p), p->xid, p->comm, points); #endif return points; } @@ -230,6 +245,7 @@ struct task_struct *p; struct task_struct *chosen = NULL; struct timespec uptime; + unsigned xid = vx_current_xid(); *ppoints = 0; do_posix_clock_monotonic_gettime(&uptime); @@ -242,11 +258,14 @@ */ if (!p->mm) continue; - /* skip the init task */ - if (is_global_init(p)) + /* skip the init task, global and per guest */ + if (task_is_init(p)) continue; if (mem && !task_in_mem_cgroup(p, mem)) continue; + /* skip other guest and host processes if oom in guest */ + if (xid && vx_task_xid(p) != xid) + continue; /* * This task already has access to memory reserves and is @@ -357,8 +376,8 @@ } if (verbose) - printk(KERN_ERR "Killed process %d (%s)\n", - task_pid_nr(p), p->comm); + printk(KERN_ERR "Killed process %s(%d:#%u)\n", + p->comm, task_pid_nr(p), p->xid); /* * We give our sacrificial lamb high priority and access to @@ -419,8 +438,8 @@ return 0; } - printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n", - message, task_pid_nr(p), p->comm, points); + printk(KERN_ERR "%s: kill process %s(%d:#%u) score %li or a child\n", + message, p->comm, task_pid_nr(p), p->xid, points); /* Try to kill a child first */ list_for_each_entry(c, &p->children, sibling) { @@ -521,6 +540,8 @@ spin_unlock(&zone_scan_lock); } +long vs_oom_action(unsigned int); + /* * Must be called with tasklist_lock held for read. */ @@ -546,7 +567,11 @@ /* Found nothing?!?! Either we hang forever, or we panic. */ if (!p) { read_unlock(&tasklist_lock); - panic("Out of memory and no killable processes...\n"); + /* avoid panic for guest OOM */ + if (current->xid) + vs_oom_action(LINUX_REBOOT_CMD_OOM); + else + panic("Out of memory and no killable processes...\n"); } if (oom_kill_process(p, gfp_mask, order, points, NULL, diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/page_alloc.c kernel-2.6.32.54.vs/linux-2.6.32/mm/page_alloc.c --- kernel-2.6.32.54/linux-2.6.32/mm/page_alloc.c 2012-01-16 15:01:40.312724018 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/page_alloc.c 2012-01-16 14:51:22.077408141 +0100 @@ -48,6 +48,8 @@ #include #include #include +#include +#include #include #include @@ -2173,6 +2175,9 @@ val->totalhigh = totalhigh_pages; val->freehigh = nr_free_highpages(); val->mem_unit = PAGE_SIZE; + + if (vx_flags(VXF_VIRT_MEM, 0)) + vx_vsi_meminfo(val); } EXPORT_SYMBOL(si_meminfo); @@ -2193,6 +2198,9 @@ val->freehigh = 0; #endif val->mem_unit = PAGE_SIZE; + + if (vx_flags(VXF_VIRT_MEM, 0)) + vx_vsi_meminfo(val); } #endif diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/page_alloc.c.orig kernel-2.6.32.54.vs/linux-2.6.32/mm/page_alloc.c.orig --- kernel-2.6.32.54/linux-2.6.32/mm/page_alloc.c.orig 2012-01-16 15:01:39.984725179 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/page_alloc.c.orig 2012-01-16 14:47:19.502254760 +0100 @@ -2112,6 +2112,26 @@ return sum; } +static unsigned int nr_unallocated_zone_pages(int offset) +{ + struct zoneref *z; + struct zone *zone; + + /* Just pick one node, since fallback list is circular */ + unsigned int sum = 0; + + struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); + + for_each_zone_zonelist(zone, z, zonelist, offset) { + unsigned long high = high_wmark_pages(zone); + unsigned long left = zone_page_state(zone, NR_FREE_PAGES); + if (left > high) + sum += left - high; + } + + return sum; +} + /* * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL */ @@ -2122,6 +2142,15 @@ EXPORT_SYMBOL_GPL(nr_free_buffer_pages); /* + * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL + */ +unsigned int nr_unallocated_buffer_pages(void) +{ + return nr_unallocated_zone_pages(gfp_zone(GFP_USER)); +} +EXPORT_SYMBOL_GPL(nr_unallocated_buffer_pages); + +/* * Amount of free RAM allocatable within all zones */ unsigned int nr_free_pagecache_pages(void) diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/rmap.c kernel-2.6.32.54.vs/linux-2.6.32/mm/rmap.c --- kernel-2.6.32.54/linux-2.6.32/mm/rmap.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/rmap.c 2012-01-16 14:51:22.077408141 +0100 @@ -55,6 +55,7 @@ #include #include #include +#include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/shmem.c kernel-2.6.32.54.vs/linux-2.6.32/mm/shmem.c --- kernel-2.6.32.54/linux-2.6.32/mm/shmem.c 2012-01-16 15:01:40.312724018 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/shmem.c 2012-01-16 14:51:22.077408141 +0100 @@ -1783,7 +1783,7 @@ { struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); - buf->f_type = TMPFS_MAGIC; + buf->f_type = TMPFS_SUPER_MAGIC; buf->f_bsize = PAGE_CACHE_SIZE; buf->f_namelen = NAME_MAX; spin_lock(&sbinfo->stat_lock); @@ -2348,7 +2348,7 @@ sb->s_maxbytes = SHMEM_MAX_BYTES; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; - sb->s_magic = TMPFS_MAGIC; + sb->s_magic = TMPFS_SUPER_MAGIC; sb->s_op = &shmem_ops; sb->s_time_gran = 1; #ifdef CONFIG_TMPFS_POSIX_ACL diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/shmem.c.orig kernel-2.6.32.54.vs/linux-2.6.32/mm/shmem.c.orig --- kernel-2.6.32.54/linux-2.6.32/mm/shmem.c.orig 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/shmem.c.orig 2012-01-16 14:47:19.502254760 +0100 @@ -0,0 +1,2698 @@ +/* + * Resizable virtual memory filesystem for Linux. + * + * Copyright (C) 2000 Linus Torvalds. + * 2000 Transmeta Corp. + * 2000-2001 Christoph Rohland + * 2000-2001 SAP AG + * 2002 Red Hat Inc. + * Copyright (C) 2002-2005 Hugh Dickins. + * Copyright (C) 2002-2005 VERITAS Software Corporation. + * Copyright (C) 2004 Andi Kleen, SuSE Labs + * + * Extended attribute support for tmpfs: + * Copyright (c) 2004, Luke Kenneth Casson Leighton + * Copyright (c) 2004 Red Hat, Inc., James Morris + * + * tiny-shmem: + * Copyright (c) 2004, 2008 Matt Mackall + * + * This file is released under the GPL. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct vfsmount *shm_mnt; + +#ifdef CONFIG_SHMEM +/* + * This virtual memory filesystem is heavily based on the ramfs. It + * extends ramfs by the ability to use swap and honor resource limits + * which makes it a completely usable filesystem. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * The maximum size of a shmem/tmpfs file is limited by the maximum size of + * its triple-indirect swap vector - see illustration at shmem_swp_entry(). + * + * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel, + * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum + * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel, + * MAX_LFS_FILESIZE being then more restrictive than swap vector layout. + * + * We use / and * instead of shifts in the definitions below, so that the swap + * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE. + */ +#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) +#define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) + +#define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) +#define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT) + +#define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE) +#define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT)) + +#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) +#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) + +/* info->flags needs VM_flags to handle pagein/truncate races efficiently */ +#define SHMEM_PAGEIN VM_READ +#define SHMEM_TRUNCATE VM_WRITE + +/* Definition to limit shmem_truncate's steps between cond_rescheds */ +#define LATENCY_LIMIT 64 + +/* Pretend that each entry is of this size in directory's i_size */ +#define BOGO_DIRENT_SIZE 20 + +/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ +enum sgp_type { + SGP_READ, /* don't exceed i_size, don't allocate page */ + SGP_CACHE, /* don't exceed i_size, may allocate page */ + SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ + SGP_WRITE, /* may exceed i_size, may allocate page */ +}; + +#ifdef CONFIG_TMPFS +static unsigned long shmem_default_max_blocks(void) +{ + return totalram_pages / 2; +} + +static unsigned long shmem_default_max_inodes(void) +{ + return min(totalram_pages - totalhigh_pages, totalram_pages / 2); +} +#endif + +static int shmem_getpage(struct inode *inode, unsigned long idx, + struct page **pagep, enum sgp_type sgp, int *type); + +static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) +{ + /* + * The above definition of ENTRIES_PER_PAGE, and the use of + * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: + * might be reconsidered if it ever diverges from PAGE_SIZE. + * + * Mobility flags are masked out as swap vectors cannot move + */ + return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO, + PAGE_CACHE_SHIFT-PAGE_SHIFT); +} + +static inline void shmem_dir_free(struct page *page) +{ + __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT); +} + +static struct page **shmem_dir_map(struct page *page) +{ + return (struct page **)kmap_atomic(page, KM_USER0); +} + +static inline void shmem_dir_unmap(struct page **dir) +{ + kunmap_atomic(dir, KM_USER0); +} + +static swp_entry_t *shmem_swp_map(struct page *page) +{ + return (swp_entry_t *)kmap_atomic(page, KM_USER1); +} + +static inline void shmem_swp_balance_unmap(void) +{ + /* + * When passing a pointer to an i_direct entry, to code which + * also handles indirect entries and so will shmem_swp_unmap, + * we must arrange for the preempt count to remain in balance. + * What kmap_atomic of a lowmem page does depends on config + * and architecture, so pretend to kmap_atomic some lowmem page. + */ + (void) kmap_atomic(ZERO_PAGE(0), KM_USER1); +} + +static inline void shmem_swp_unmap(swp_entry_t *entry) +{ + kunmap_atomic(entry, KM_USER1); +} + +static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) +{ + return sb->s_fs_info; +} + +/* + * shmem_file_setup pre-accounts the whole fixed size of a VM object, + * for shared memory and for shared anonymous (/dev/zero) mappings + * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), + * consistent with the pre-accounting of private mappings ... + */ +static inline int shmem_acct_size(unsigned long flags, loff_t size) +{ + return (flags & VM_NORESERVE) ? + 0 : security_vm_enough_memory_kern(VM_ACCT(size)); +} + +static inline void shmem_unacct_size(unsigned long flags, loff_t size) +{ + if (!(flags & VM_NORESERVE)) + vm_unacct_memory(VM_ACCT(size)); +} + +/* + * ... whereas tmpfs objects are accounted incrementally as + * pages are allocated, in order to allow huge sparse files. + * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, + * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. + */ +static inline int shmem_acct_block(unsigned long flags) +{ + return (flags & VM_NORESERVE) ? + security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0; +} + +static inline void shmem_unacct_blocks(unsigned long flags, long pages) +{ + if (flags & VM_NORESERVE) + vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); +} + +static const struct super_operations shmem_ops; +static const struct address_space_operations shmem_aops; +static const struct file_operations shmem_file_operations; +static const struct inode_operations shmem_inode_operations; +static const struct inode_operations shmem_dir_inode_operations; +static const struct inode_operations shmem_special_inode_operations; +static const struct vm_operations_struct shmem_vm_ops; + +static struct backing_dev_info shmem_backing_dev_info __read_mostly = { + .ra_pages = 0, /* No readahead */ + .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, + .unplug_io_fn = default_unplug_io_fn, +}; + +static LIST_HEAD(shmem_swaplist); +static DEFINE_MUTEX(shmem_swaplist_mutex); + +static void shmem_free_blocks(struct inode *inode, long pages) +{ + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + if (sbinfo->max_blocks) { + spin_lock(&sbinfo->stat_lock); + sbinfo->free_blocks += pages; + inode->i_blocks -= pages*BLOCKS_PER_PAGE; + spin_unlock(&sbinfo->stat_lock); + } +} + +static int shmem_reserve_inode(struct super_block *sb) +{ + struct shmem_sb_info *sbinfo = SHMEM_SB(sb); + if (sbinfo->max_inodes) { + spin_lock(&sbinfo->stat_lock); + if (!sbinfo->free_inodes) { + spin_unlock(&sbinfo->stat_lock); + return -ENOSPC; + } + sbinfo->free_inodes--; + spin_unlock(&sbinfo->stat_lock); + } + return 0; +} + +static void shmem_free_inode(struct super_block *sb) +{ + struct shmem_sb_info *sbinfo = SHMEM_SB(sb); + if (sbinfo->max_inodes) { + spin_lock(&sbinfo->stat_lock); + sbinfo->free_inodes++; + spin_unlock(&sbinfo->stat_lock); + } +} + +/** + * shmem_recalc_inode - recalculate the size of an inode + * @inode: inode to recalc + * + * We have to calculate the free blocks since the mm can drop + * undirtied hole pages behind our back. + * + * But normally info->alloced == inode->i_mapping->nrpages + info->swapped + * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) + * + * It has to be called with the spinlock held. + */ +static void shmem_recalc_inode(struct inode *inode) +{ + struct shmem_inode_info *info = SHMEM_I(inode); + long freed; + + freed = info->alloced - info->swapped - inode->i_mapping->nrpages; + if (freed > 0) { + info->alloced -= freed; + shmem_unacct_blocks(info->flags, freed); + shmem_free_blocks(inode, freed); + } +} + +/** + * shmem_swp_entry - find the swap vector position in the info structure + * @info: info structure for the inode + * @index: index of the page to find + * @page: optional page to add to the structure. Has to be preset to + * all zeros + * + * If there is no space allocated yet it will return NULL when + * page is NULL, else it will use the page for the needed block, + * setting it to NULL on return to indicate that it has been used. + * + * The swap vector is organized the following way: + * + * There are SHMEM_NR_DIRECT entries directly stored in the + * shmem_inode_info structure. So small files do not need an addional + * allocation. + * + * For pages with index > SHMEM_NR_DIRECT there is the pointer + * i_indirect which points to a page which holds in the first half + * doubly indirect blocks, in the second half triple indirect blocks: + * + * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the + * following layout (for SHMEM_NR_DIRECT == 16): + * + * i_indirect -> dir --> 16-19 + * | +-> 20-23 + * | + * +-->dir2 --> 24-27 + * | +-> 28-31 + * | +-> 32-35 + * | +-> 36-39 + * | + * +-->dir3 --> 40-43 + * +-> 44-47 + * +-> 48-51 + * +-> 52-55 + */ +static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page) +{ + unsigned long offset; + struct page **dir; + struct page *subdir; + + if (index < SHMEM_NR_DIRECT) { + shmem_swp_balance_unmap(); + return info->i_direct+index; + } + if (!info->i_indirect) { + if (page) { + info->i_indirect = *page; + *page = NULL; + } + return NULL; /* need another page */ + } + + index -= SHMEM_NR_DIRECT; + offset = index % ENTRIES_PER_PAGE; + index /= ENTRIES_PER_PAGE; + dir = shmem_dir_map(info->i_indirect); + + if (index >= ENTRIES_PER_PAGE/2) { + index -= ENTRIES_PER_PAGE/2; + dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE; + index %= ENTRIES_PER_PAGE; + subdir = *dir; + if (!subdir) { + if (page) { + *dir = *page; + *page = NULL; + } + shmem_dir_unmap(dir); + return NULL; /* need another page */ + } + shmem_dir_unmap(dir); + dir = shmem_dir_map(subdir); + } + + dir += index; + subdir = *dir; + if (!subdir) { + if (!page || !(subdir = *page)) { + shmem_dir_unmap(dir); + return NULL; /* need a page */ + } + *dir = subdir; + *page = NULL; + } + shmem_dir_unmap(dir); + return shmem_swp_map(subdir) + offset; +} + +static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value) +{ + long incdec = value? 1: -1; + + entry->val = value; + info->swapped += incdec; + if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) { + struct page *page = kmap_atomic_to_page(entry); + set_page_private(page, page_private(page) + incdec); + } +} + +/** + * shmem_swp_alloc - get the position of the swap entry for the page. + * @info: info structure for the inode + * @index: index of the page to find + * @sgp: check and recheck i_size? skip allocation? + * + * If the entry does not exist, allocate it. + */ +static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp) +{ + struct inode *inode = &info->vfs_inode; + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + struct page *page = NULL; + swp_entry_t *entry; + + if (sgp != SGP_WRITE && + ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) + return ERR_PTR(-EINVAL); + + while (!(entry = shmem_swp_entry(info, index, &page))) { + if (sgp == SGP_READ) + return shmem_swp_map(ZERO_PAGE(0)); + /* + * Test free_blocks against 1 not 0, since we have 1 data + * page (and perhaps indirect index pages) yet to allocate: + * a waste to allocate index if we cannot allocate data. + */ + if (sbinfo->max_blocks) { + spin_lock(&sbinfo->stat_lock); + if (sbinfo->free_blocks <= 1) { + spin_unlock(&sbinfo->stat_lock); + return ERR_PTR(-ENOSPC); + } + sbinfo->free_blocks--; + inode->i_blocks += BLOCKS_PER_PAGE; + spin_unlock(&sbinfo->stat_lock); + } + + spin_unlock(&info->lock); + page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping)); + if (page) + set_page_private(page, 0); + spin_lock(&info->lock); + + if (!page) { + shmem_free_blocks(inode, 1); + return ERR_PTR(-ENOMEM); + } + if (sgp != SGP_WRITE && + ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { + entry = ERR_PTR(-EINVAL); + break; + } + if (info->next_index <= index) + info->next_index = index + 1; + } + if (page) { + /* another task gave its page, or truncated the file */ + shmem_free_blocks(inode, 1); + shmem_dir_free(page); + } + if (info->next_index <= index && !IS_ERR(entry)) + info->next_index = index + 1; + return entry; +} + +/** + * shmem_free_swp - free some swap entries in a directory + * @dir: pointer to the directory + * @edir: pointer after last entry of the directory + * @punch_lock: pointer to spinlock when needed for the holepunch case + */ +static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir, + spinlock_t *punch_lock) +{ + spinlock_t *punch_unlock = NULL; + swp_entry_t *ptr; + int freed = 0; + + for (ptr = dir; ptr < edir; ptr++) { + if (ptr->val) { + if (unlikely(punch_lock)) { + punch_unlock = punch_lock; + punch_lock = NULL; + spin_lock(punch_unlock); + if (!ptr->val) + continue; + } + free_swap_and_cache(*ptr); + *ptr = (swp_entry_t){0}; + freed++; + } + } + if (punch_unlock) + spin_unlock(punch_unlock); + return freed; +} + +static int shmem_map_and_free_swp(struct page *subdir, int offset, + int limit, struct page ***dir, spinlock_t *punch_lock) +{ + swp_entry_t *ptr; + int freed = 0; + + ptr = shmem_swp_map(subdir); + for (; offset < limit; offset += LATENCY_LIMIT) { + int size = limit - offset; + if (size > LATENCY_LIMIT) + size = LATENCY_LIMIT; + freed += shmem_free_swp(ptr+offset, ptr+offset+size, + punch_lock); + if (need_resched()) { + shmem_swp_unmap(ptr); + if (*dir) { + shmem_dir_unmap(*dir); + *dir = NULL; + } + cond_resched(); + ptr = shmem_swp_map(subdir); + } + } + shmem_swp_unmap(ptr); + return freed; +} + +static void shmem_free_pages(struct list_head *next) +{ + struct page *page; + int freed = 0; + + do { + page = container_of(next, struct page, lru); + next = next->next; + shmem_dir_free(page); + freed++; + if (freed >= LATENCY_LIMIT) { + cond_resched(); + freed = 0; + } + } while (next); +} + +static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) +{ + struct shmem_inode_info *info = SHMEM_I(inode); + unsigned long idx; + unsigned long size; + unsigned long limit; + unsigned long stage; + unsigned long diroff; + struct page **dir; + struct page *topdir; + struct page *middir; + struct page *subdir; + swp_entry_t *ptr; + LIST_HEAD(pages_to_free); + long nr_pages_to_free = 0; + long nr_swaps_freed = 0; + int offset; + int freed; + int punch_hole; + spinlock_t *needs_lock; + spinlock_t *punch_lock; + unsigned long upper_limit; + + inode->i_ctime = inode->i_mtime = CURRENT_TIME; + idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + if (idx >= info->next_index) + return; + + spin_lock(&info->lock); + info->flags |= SHMEM_TRUNCATE; + if (likely(end == (loff_t) -1)) { + limit = info->next_index; + upper_limit = SHMEM_MAX_INDEX; + info->next_index = idx; + needs_lock = NULL; + punch_hole = 0; + } else { + if (end + 1 >= inode->i_size) { /* we may free a little more */ + limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; + upper_limit = SHMEM_MAX_INDEX; + } else { + limit = (end + 1) >> PAGE_CACHE_SHIFT; + upper_limit = limit; + } + needs_lock = &info->lock; + punch_hole = 1; + } + + topdir = info->i_indirect; + if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) { + info->i_indirect = NULL; + nr_pages_to_free++; + list_add(&topdir->lru, &pages_to_free); + } + spin_unlock(&info->lock); + + if (info->swapped && idx < SHMEM_NR_DIRECT) { + ptr = info->i_direct; + size = limit; + if (size > SHMEM_NR_DIRECT) + size = SHMEM_NR_DIRECT; + nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock); + } + + /* + * If there are no indirect blocks or we are punching a hole + * below indirect blocks, nothing to be done. + */ + if (!topdir || limit <= SHMEM_NR_DIRECT) + goto done2; + + /* + * The truncation case has already dropped info->lock, and we're safe + * because i_size and next_index have already been lowered, preventing + * access beyond. But in the punch_hole case, we still need to take + * the lock when updating the swap directory, because there might be + * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or + * shmem_writepage. However, whenever we find we can remove a whole + * directory page (not at the misaligned start or end of the range), + * we first NULLify its pointer in the level above, and then have no + * need to take the lock when updating its contents: needs_lock and + * punch_lock (either pointing to info->lock or NULL) manage this. + */ + + upper_limit -= SHMEM_NR_DIRECT; + limit -= SHMEM_NR_DIRECT; + idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0; + offset = idx % ENTRIES_PER_PAGE; + idx -= offset; + + dir = shmem_dir_map(topdir); + stage = ENTRIES_PER_PAGEPAGE/2; + if (idx < ENTRIES_PER_PAGEPAGE/2) { + middir = topdir; + diroff = idx/ENTRIES_PER_PAGE; + } else { + dir += ENTRIES_PER_PAGE/2; + dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE; + while (stage <= idx) + stage += ENTRIES_PER_PAGEPAGE; + middir = *dir; + if (*dir) { + diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) % + ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE; + if (!diroff && !offset && upper_limit >= stage) { + if (needs_lock) { + spin_lock(needs_lock); + *dir = NULL; + spin_unlock(needs_lock); + needs_lock = NULL; + } else + *dir = NULL; + nr_pages_to_free++; + list_add(&middir->lru, &pages_to_free); + } + shmem_dir_unmap(dir); + dir = shmem_dir_map(middir); + } else { + diroff = 0; + offset = 0; + idx = stage; + } + } + + for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) { + if (unlikely(idx == stage)) { + shmem_dir_unmap(dir); + dir = shmem_dir_map(topdir) + + ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; + while (!*dir) { + dir++; + idx += ENTRIES_PER_PAGEPAGE; + if (idx >= limit) + goto done1; + } + stage = idx + ENTRIES_PER_PAGEPAGE; + middir = *dir; + if (punch_hole) + needs_lock = &info->lock; + if (upper_limit >= stage) { + if (needs_lock) { + spin_lock(needs_lock); + *dir = NULL; + spin_unlock(needs_lock); + needs_lock = NULL; + } else + *dir = NULL; + nr_pages_to_free++; + list_add(&middir->lru, &pages_to_free); + } + shmem_dir_unmap(dir); + cond_resched(); + dir = shmem_dir_map(middir); + diroff = 0; + } + punch_lock = needs_lock; + subdir = dir[diroff]; + if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) { + if (needs_lock) { + spin_lock(needs_lock); + dir[diroff] = NULL; + spin_unlock(needs_lock); + punch_lock = NULL; + } else + dir[diroff] = NULL; + nr_pages_to_free++; + list_add(&subdir->lru, &pages_to_free); + } + if (subdir && page_private(subdir) /* has swap entries */) { + size = limit - idx; + if (size > ENTRIES_PER_PAGE) + size = ENTRIES_PER_PAGE; + freed = shmem_map_and_free_swp(subdir, + offset, size, &dir, punch_lock); + if (!dir) + dir = shmem_dir_map(middir); + nr_swaps_freed += freed; + if (offset || punch_lock) { + spin_lock(&info->lock); + set_page_private(subdir, + page_private(subdir) - freed); + spin_unlock(&info->lock); + } else + BUG_ON(page_private(subdir) != freed); + } + offset = 0; + } +done1: + shmem_dir_unmap(dir); +done2: + if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { + /* + * Call truncate_inode_pages again: racing shmem_unuse_inode + * may have swizzled a page in from swap since vmtruncate or + * generic_delete_inode did it, before we lowered next_index. + * Also, though shmem_getpage checks i_size before adding to + * cache, no recheck after: so fix the narrow window there too. + * + * Recalling truncate_inode_pages_range and unmap_mapping_range + * every time for punch_hole (which never got a chance to clear + * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive, + * yet hardly ever necessary: try to optimize them out later. + */ + truncate_inode_pages_range(inode->i_mapping, start, end); + if (punch_hole) + unmap_mapping_range(inode->i_mapping, start, + end - start, 1); + } + + spin_lock(&info->lock); + info->flags &= ~SHMEM_TRUNCATE; + info->swapped -= nr_swaps_freed; + if (nr_pages_to_free) + shmem_free_blocks(inode, nr_pages_to_free); + shmem_recalc_inode(inode); + spin_unlock(&info->lock); + + /* + * Empty swap vector directory pages to be freed? + */ + if (!list_empty(&pages_to_free)) { + pages_to_free.prev->next = NULL; + shmem_free_pages(pages_to_free.next); + } +} + +static void shmem_truncate(struct inode *inode) +{ + shmem_truncate_range(inode, inode->i_size, (loff_t)-1); +} + +static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) +{ + struct inode *inode = dentry->d_inode; + struct page *page = NULL; + int error; + + if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { + if (attr->ia_size < inode->i_size) { + /* + * If truncating down to a partial page, then + * if that page is already allocated, hold it + * in memory until the truncation is over, so + * truncate_partial_page cannnot miss it were + * it assigned to swap. + */ + if (attr->ia_size & (PAGE_CACHE_SIZE-1)) { + (void) shmem_getpage(inode, + attr->ia_size>>PAGE_CACHE_SHIFT, + &page, SGP_READ, NULL); + if (page) + unlock_page(page); + } + /* + * Reset SHMEM_PAGEIN flag so that shmem_truncate can + * detect if any pages might have been added to cache + * after truncate_inode_pages. But we needn't bother + * if it's being fully truncated to zero-length: the + * nrpages check is efficient enough in that case. + */ + if (attr->ia_size) { + struct shmem_inode_info *info = SHMEM_I(inode); + spin_lock(&info->lock); + info->flags &= ~SHMEM_PAGEIN; + spin_unlock(&info->lock); + } + } + } + + error = inode_change_ok(inode, attr); + if (!error) + error = inode_setattr(inode, attr); +#ifdef CONFIG_TMPFS_POSIX_ACL + if (!error && (attr->ia_valid & ATTR_MODE)) + error = generic_acl_chmod(inode, &shmem_acl_ops); +#endif + if (page) + page_cache_release(page); + return error; +} + +static void shmem_delete_inode(struct inode *inode) +{ + struct shmem_inode_info *info = SHMEM_I(inode); + + if (inode->i_op->truncate == shmem_truncate) { + truncate_inode_pages(inode->i_mapping, 0); + shmem_unacct_size(info->flags, inode->i_size); + inode->i_size = 0; + shmem_truncate(inode); + if (!list_empty(&info->swaplist)) { + mutex_lock(&shmem_swaplist_mutex); + list_del_init(&info->swaplist); + mutex_unlock(&shmem_swaplist_mutex); + } + } + BUG_ON(inode->i_blocks); + shmem_free_inode(inode->i_sb); + clear_inode(inode); +} + +static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir) +{ + swp_entry_t *ptr; + + for (ptr = dir; ptr < edir; ptr++) { + if (ptr->val == entry.val) + return ptr - dir; + } + return -1; +} + +static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) +{ + struct inode *inode; + unsigned long idx; + unsigned long size; + unsigned long limit; + unsigned long stage; + struct page **dir; + struct page *subdir; + swp_entry_t *ptr; + int offset; + int error; + + idx = 0; + ptr = info->i_direct; + spin_lock(&info->lock); + if (!info->swapped) { + list_del_init(&info->swaplist); + goto lost2; + } + limit = info->next_index; + size = limit; + if (size > SHMEM_NR_DIRECT) + size = SHMEM_NR_DIRECT; + offset = shmem_find_swp(entry, ptr, ptr+size); + if (offset >= 0) + goto found; + if (!info->i_indirect) + goto lost2; + + dir = shmem_dir_map(info->i_indirect); + stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2; + + for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) { + if (unlikely(idx == stage)) { + shmem_dir_unmap(dir-1); + if (cond_resched_lock(&info->lock)) { + /* check it has not been truncated */ + if (limit > info->next_index) { + limit = info->next_index; + if (idx >= limit) + goto lost2; + } + } + dir = shmem_dir_map(info->i_indirect) + + ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; + while (!*dir) { + dir++; + idx += ENTRIES_PER_PAGEPAGE; + if (idx >= limit) + goto lost1; + } + stage = idx + ENTRIES_PER_PAGEPAGE; + subdir = *dir; + shmem_dir_unmap(dir); + dir = shmem_dir_map(subdir); + } + subdir = *dir; + if (subdir && page_private(subdir)) { + ptr = shmem_swp_map(subdir); + size = limit - idx; + if (size > ENTRIES_PER_PAGE) + size = ENTRIES_PER_PAGE; + offset = shmem_find_swp(entry, ptr, ptr+size); + shmem_swp_unmap(ptr); + if (offset >= 0) { + shmem_dir_unmap(dir); + goto found; + } + } + } +lost1: + shmem_dir_unmap(dir-1); +lost2: + spin_unlock(&info->lock); + return 0; +found: + idx += offset; + inode = igrab(&info->vfs_inode); + spin_unlock(&info->lock); + + /* + * Move _head_ to start search for next from here. + * But be careful: shmem_delete_inode checks list_empty without taking + * mutex, and there's an instant in list_move_tail when info->swaplist + * would appear empty, if it were the only one on shmem_swaplist. We + * could avoid doing it if inode NULL; or use this minor optimization. + */ + if (shmem_swaplist.next != &info->swaplist) + list_move_tail(&shmem_swaplist, &info->swaplist); + mutex_unlock(&shmem_swaplist_mutex); + + error = 1; + if (!inode) + goto out; + /* + * Charge page using GFP_KERNEL while we can wait. + * Charged back to the user(not to caller) when swap account is used. + * add_to_page_cache() will be called with GFP_NOWAIT. + */ + error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); + if (error) + goto out; + error = radix_tree_preload(GFP_KERNEL); + if (error) { + mem_cgroup_uncharge_cache_page(page); + goto out; + } + error = 1; + + spin_lock(&info->lock); + ptr = shmem_swp_entry(info, idx, NULL); + if (ptr && ptr->val == entry.val) { + error = add_to_page_cache_locked(page, inode->i_mapping, + idx, GFP_NOWAIT); + /* does mem_cgroup_uncharge_cache_page on error */ + } else /* we must compensate for our precharge above */ + mem_cgroup_uncharge_cache_page(page); + + if (error == -EEXIST) { + struct page *filepage = find_get_page(inode->i_mapping, idx); + error = 1; + if (filepage) { + /* + * There might be a more uptodate page coming down + * from a stacked writepage: forget our swappage if so. + */ + if (PageUptodate(filepage)) + error = 0; + page_cache_release(filepage); + } + } + if (!error) { + delete_from_swap_cache(page); + set_page_dirty(page); + info->flags |= SHMEM_PAGEIN; + shmem_swp_set(info, ptr, 0); + swap_free(entry); + error = 1; /* not an error, but entry was found */ + } + if (ptr) + shmem_swp_unmap(ptr); + spin_unlock(&info->lock); + radix_tree_preload_end(); +out: + unlock_page(page); + page_cache_release(page); + iput(inode); /* allows for NULL */ + return error; +} + +/* + * shmem_unuse() search for an eventually swapped out shmem page. + */ +int shmem_unuse(swp_entry_t entry, struct page *page) +{ + struct list_head *p, *next; + struct shmem_inode_info *info; + int found = 0; + + mutex_lock(&shmem_swaplist_mutex); + list_for_each_safe(p, next, &shmem_swaplist) { + info = list_entry(p, struct shmem_inode_info, swaplist); + found = shmem_unuse_inode(info, entry, page); + cond_resched(); + if (found) + goto out; + } + mutex_unlock(&shmem_swaplist_mutex); +out: return found; /* 0 or 1 or -ENOMEM */ +} + +/* + * Move the page from the page cache to the swap cache. + */ +static int shmem_writepage(struct page *page, struct writeback_control *wbc) +{ + struct shmem_inode_info *info; + swp_entry_t *entry, swap; + struct address_space *mapping; + unsigned long index; + struct inode *inode; + + BUG_ON(!PageLocked(page)); + mapping = page->mapping; + index = page->index; + inode = mapping->host; + info = SHMEM_I(inode); + if (info->flags & VM_LOCKED) + goto redirty; + if (!total_swap_pages) + goto redirty; + + /* + * shmem_backing_dev_info's capabilities prevent regular writeback or + * sync from ever calling shmem_writepage; but a stacking filesystem + * may use the ->writepage of its underlying filesystem, in which case + * tmpfs should write out to swap only in response to memory pressure, + * and not for the writeback threads or sync. However, in those cases, + * we do still want to check if there's a redundant swappage to be + * discarded. + */ + if (wbc->for_reclaim) + swap = get_swap_page(); + else + swap.val = 0; + + spin_lock(&info->lock); + if (index >= info->next_index) { + BUG_ON(!(info->flags & SHMEM_TRUNCATE)); + goto unlock; + } + entry = shmem_swp_entry(info, index, NULL); + if (entry->val) { + /* + * The more uptodate page coming down from a stacked + * writepage should replace our old swappage. + */ + free_swap_and_cache(*entry); + shmem_swp_set(info, entry, 0); + } + shmem_recalc_inode(inode); + + if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { + remove_from_page_cache(page); + shmem_swp_set(info, entry, swap.val); + shmem_swp_unmap(entry); + if (list_empty(&info->swaplist)) + inode = igrab(inode); + else + inode = NULL; + spin_unlock(&info->lock); + swap_duplicate(swap); + BUG_ON(page_mapped(page)); + page_cache_release(page); /* pagecache ref */ + swap_writepage(page, wbc); + if (inode) { + mutex_lock(&shmem_swaplist_mutex); + /* move instead of add in case we're racing */ + list_move_tail(&info->swaplist, &shmem_swaplist); + mutex_unlock(&shmem_swaplist_mutex); + iput(inode); + } + return 0; + } + + shmem_swp_unmap(entry); +unlock: + spin_unlock(&info->lock); + /* + * add_to_swap_cache() doesn't return -EEXIST, so we can safely + * clear SWAP_HAS_CACHE flag. + */ + swapcache_free(swap, NULL); +redirty: + set_page_dirty(page); + if (wbc->for_reclaim) + return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ + unlock_page(page); + return 0; +} + +#ifdef CONFIG_NUMA +#ifdef CONFIG_TMPFS +static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) +{ + char buffer[64]; + + if (!mpol || mpol->mode == MPOL_DEFAULT) + return; /* show nothing */ + + mpol_to_str(buffer, sizeof(buffer), mpol, 1); + + seq_printf(seq, ",mpol=%s", buffer); +} + +static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) +{ + struct mempolicy *mpol = NULL; + if (sbinfo->mpol) { + spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ + mpol = sbinfo->mpol; + mpol_get(mpol); + spin_unlock(&sbinfo->stat_lock); + } + return mpol; +} +#endif /* CONFIG_TMPFS */ + +static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, + struct shmem_inode_info *info, unsigned long idx) +{ + struct mempolicy mpol, *spol; + struct vm_area_struct pvma; + struct page *page; + + spol = mpol_cond_copy(&mpol, + mpol_shared_policy_lookup(&info->policy, idx)); + + /* Create a pseudo vma that just contains the policy */ + pvma.vm_start = 0; + pvma.vm_pgoff = idx; + pvma.vm_ops = NULL; + pvma.vm_policy = spol; + page = swapin_readahead(entry, gfp, &pvma, 0); + return page; +} + +static struct page *shmem_alloc_page(gfp_t gfp, + struct shmem_inode_info *info, unsigned long idx) +{ + struct vm_area_struct pvma; + + /* Create a pseudo vma that just contains the policy */ + pvma.vm_start = 0; + pvma.vm_pgoff = idx; + pvma.vm_ops = NULL; + pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); + + /* + * alloc_page_vma() will drop the shared policy reference + */ + return alloc_page_vma(gfp, &pvma, 0); +} +#else /* !CONFIG_NUMA */ +#ifdef CONFIG_TMPFS +static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p) +{ +} +#endif /* CONFIG_TMPFS */ + +static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, + struct shmem_inode_info *info, unsigned long idx) +{ + return swapin_readahead(entry, gfp, NULL, 0); +} + +static inline struct page *shmem_alloc_page(gfp_t gfp, + struct shmem_inode_info *info, unsigned long idx) +{ + return alloc_page(gfp); +} +#endif /* CONFIG_NUMA */ + +#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) +static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) +{ + return NULL; +} +#endif + +/* + * shmem_getpage - either get the page from swap or allocate a new one + * + * If we allocate a new one we do not mark it dirty. That's up to the + * vm. If we swap it in we mark it dirty since we also free the swap + * entry since a page cannot live in both the swap and page cache + */ +static int shmem_getpage(struct inode *inode, unsigned long idx, + struct page **pagep, enum sgp_type sgp, int *type) +{ + struct address_space *mapping = inode->i_mapping; + struct shmem_inode_info *info = SHMEM_I(inode); + struct shmem_sb_info *sbinfo; + struct page *filepage = *pagep; + struct page *swappage; + swp_entry_t *entry; + swp_entry_t swap; + gfp_t gfp; + int error; + + if (idx >= SHMEM_MAX_INDEX) + return -EFBIG; + + if (type) + *type = 0; + + /* + * Normally, filepage is NULL on entry, and either found + * uptodate immediately, or allocated and zeroed, or read + * in under swappage, which is then assigned to filepage. + * But shmem_readpage (required for splice) passes in a locked + * filepage, which may be found not uptodate by other callers + * too, and may need to be copied from the swappage read in. + */ +repeat: + if (!filepage) + filepage = find_lock_page(mapping, idx); + if (filepage && PageUptodate(filepage)) + goto done; + error = 0; + gfp = mapping_gfp_mask(mapping); + if (!filepage) { + /* + * Try to preload while we can wait, to not make a habit of + * draining atomic reserves; but don't latch on to this cpu. + */ + error = radix_tree_preload(gfp & ~__GFP_HIGHMEM); + if (error) + goto failed; + radix_tree_preload_end(); + } + + spin_lock(&info->lock); + shmem_recalc_inode(inode); + entry = shmem_swp_alloc(info, idx, sgp); + if (IS_ERR(entry)) { + spin_unlock(&info->lock); + error = PTR_ERR(entry); + goto failed; + } + swap = *entry; + + if (swap.val) { + /* Look it up and read it in.. */ + swappage = lookup_swap_cache(swap); + if (!swappage) { + shmem_swp_unmap(entry); + /* here we actually do the io */ + if (type && !(*type & VM_FAULT_MAJOR)) { + __count_vm_event(PGMAJFAULT); + *type |= VM_FAULT_MAJOR; + } + spin_unlock(&info->lock); + swappage = shmem_swapin(swap, gfp, info, idx); + if (!swappage) { + spin_lock(&info->lock); + entry = shmem_swp_alloc(info, idx, sgp); + if (IS_ERR(entry)) + error = PTR_ERR(entry); + else { + if (entry->val == swap.val) + error = -ENOMEM; + shmem_swp_unmap(entry); + } + spin_unlock(&info->lock); + if (error) + goto failed; + goto repeat; + } + wait_on_page_locked(swappage); + page_cache_release(swappage); + goto repeat; + } + + /* We have to do this with page locked to prevent races */ + if (!trylock_page(swappage)) { + shmem_swp_unmap(entry); + spin_unlock(&info->lock); + wait_on_page_locked(swappage); + page_cache_release(swappage); + goto repeat; + } + if (PageWriteback(swappage)) { + shmem_swp_unmap(entry); + spin_unlock(&info->lock); + wait_on_page_writeback(swappage); + unlock_page(swappage); + page_cache_release(swappage); + goto repeat; + } + if (!PageUptodate(swappage)) { + shmem_swp_unmap(entry); + spin_unlock(&info->lock); + unlock_page(swappage); + page_cache_release(swappage); + error = -EIO; + goto failed; + } + + if (filepage) { + shmem_swp_set(info, entry, 0); + shmem_swp_unmap(entry); + delete_from_swap_cache(swappage); + spin_unlock(&info->lock); + copy_highpage(filepage, swappage); + unlock_page(swappage); + page_cache_release(swappage); + flush_dcache_page(filepage); + SetPageUptodate(filepage); + set_page_dirty(filepage); + swap_free(swap); + } else if (!(error = add_to_page_cache_locked(swappage, mapping, + idx, GFP_NOWAIT))) { + info->flags |= SHMEM_PAGEIN; + shmem_swp_set(info, entry, 0); + shmem_swp_unmap(entry); + delete_from_swap_cache(swappage); + spin_unlock(&info->lock); + filepage = swappage; + set_page_dirty(filepage); + swap_free(swap); + } else { + shmem_swp_unmap(entry); + spin_unlock(&info->lock); + if (error == -ENOMEM) { + /* + * reclaim from proper memory cgroup and + * call memcg's OOM if needed. + */ + error = mem_cgroup_shmem_charge_fallback( + swappage, + current->mm, + gfp); + if (error) { + unlock_page(swappage); + page_cache_release(swappage); + goto failed; + } + } + unlock_page(swappage); + page_cache_release(swappage); + goto repeat; + } + } else if (sgp == SGP_READ && !filepage) { + shmem_swp_unmap(entry); + filepage = find_get_page(mapping, idx); + if (filepage && + (!PageUptodate(filepage) || !trylock_page(filepage))) { + spin_unlock(&info->lock); + wait_on_page_locked(filepage); + page_cache_release(filepage); + filepage = NULL; + goto repeat; + } + spin_unlock(&info->lock); + } else { + shmem_swp_unmap(entry); + sbinfo = SHMEM_SB(inode->i_sb); + if (sbinfo->max_blocks) { + spin_lock(&sbinfo->stat_lock); + if (sbinfo->free_blocks == 0 || + shmem_acct_block(info->flags)) { + spin_unlock(&sbinfo->stat_lock); + spin_unlock(&info->lock); + error = -ENOSPC; + goto failed; + } + sbinfo->free_blocks--; + inode->i_blocks += BLOCKS_PER_PAGE; + spin_unlock(&sbinfo->stat_lock); + } else if (shmem_acct_block(info->flags)) { + spin_unlock(&info->lock); + error = -ENOSPC; + goto failed; + } + + if (!filepage) { + int ret; + + spin_unlock(&info->lock); + filepage = shmem_alloc_page(gfp, info, idx); + if (!filepage) { + shmem_unacct_blocks(info->flags, 1); + shmem_free_blocks(inode, 1); + error = -ENOMEM; + goto failed; + } + SetPageSwapBacked(filepage); + + /* Precharge page while we can wait, compensate after */ + error = mem_cgroup_cache_charge(filepage, current->mm, + GFP_KERNEL); + if (error) { + page_cache_release(filepage); + shmem_unacct_blocks(info->flags, 1); + shmem_free_blocks(inode, 1); + filepage = NULL; + goto failed; + } + + spin_lock(&info->lock); + entry = shmem_swp_alloc(info, idx, sgp); + if (IS_ERR(entry)) + error = PTR_ERR(entry); + else { + swap = *entry; + shmem_swp_unmap(entry); + } + ret = error || swap.val; + if (ret) + mem_cgroup_uncharge_cache_page(filepage); + else + ret = add_to_page_cache_lru(filepage, mapping, + idx, GFP_NOWAIT); + /* + * At add_to_page_cache_lru() failure, uncharge will + * be done automatically. + */ + if (ret) { + spin_unlock(&info->lock); + page_cache_release(filepage); + shmem_unacct_blocks(info->flags, 1); + shmem_free_blocks(inode, 1); + filepage = NULL; + if (error) + goto failed; + goto repeat; + } + info->flags |= SHMEM_PAGEIN; + } + + info->alloced++; + spin_unlock(&info->lock); + clear_highpage(filepage); + flush_dcache_page(filepage); + SetPageUptodate(filepage); + if (sgp == SGP_DIRTY) + set_page_dirty(filepage); + } +done: + *pagep = filepage; + return 0; + +failed: + if (*pagep != filepage) { + unlock_page(filepage); + page_cache_release(filepage); + } + return error; +} + +static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct inode *inode = vma->vm_file->f_path.dentry->d_inode; + int error; + int ret; + + if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) + return VM_FAULT_SIGBUS; + + error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); + if (error) + return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); + + return ret | VM_FAULT_LOCKED; +} + +#ifdef CONFIG_NUMA +static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) +{ + struct inode *i = vma->vm_file->f_path.dentry->d_inode; + return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); +} + +static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, + unsigned long addr) +{ + struct inode *i = vma->vm_file->f_path.dentry->d_inode; + unsigned long idx; + + idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; + return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx); +} +#endif + +int shmem_lock(struct file *file, int lock, struct user_struct *user) +{ + struct inode *inode = file->f_path.dentry->d_inode; + struct shmem_inode_info *info = SHMEM_I(inode); + int retval = -ENOMEM; + + spin_lock(&info->lock); + if (lock && !(info->flags & VM_LOCKED)) { + if (!user_shm_lock(inode->i_size, user)) + goto out_nomem; + info->flags |= VM_LOCKED; + mapping_set_unevictable(file->f_mapping); + } + if (!lock && (info->flags & VM_LOCKED) && user) { + user_shm_unlock(inode->i_size, user); + info->flags &= ~VM_LOCKED; + mapping_clear_unevictable(file->f_mapping); + scan_mapping_unevictable_pages(file->f_mapping); + } + retval = 0; + +out_nomem: + spin_unlock(&info->lock); + return retval; +} + +static int shmem_mmap(struct file *file, struct vm_area_struct *vma) +{ + file_accessed(file); + vma->vm_ops = &shmem_vm_ops; + vma->vm_flags |= VM_CAN_NONLINEAR; + return 0; +} + +static struct inode *shmem_get_inode(struct super_block *sb, int mode, + dev_t dev, unsigned long flags) +{ + struct inode *inode; + struct shmem_inode_info *info; + struct shmem_sb_info *sbinfo = SHMEM_SB(sb); + + if (shmem_reserve_inode(sb)) + return NULL; + + inode = new_inode(sb); + if (inode) { + inode->i_mode = mode; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); + inode->i_blocks = 0; + inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + inode->i_generation = get_seconds(); + info = SHMEM_I(inode); + memset(info, 0, (char *)inode - (char *)info); + spin_lock_init(&info->lock); + info->flags = flags & VM_NORESERVE; + if (flags & VM_ATOMIC_COPY) + inode->i_flags |= S_ATOMIC_COPY; + INIT_LIST_HEAD(&info->swaplist); + cache_no_acl(inode); + + switch (mode & S_IFMT) { + default: + inode->i_op = &shmem_special_inode_operations; + init_special_inode(inode, mode, dev); + break; + case S_IFREG: + inode->i_mapping->a_ops = &shmem_aops; + inode->i_op = &shmem_inode_operations; + inode->i_fop = &shmem_file_operations; + mpol_shared_policy_init(&info->policy, + shmem_get_sbmpol(sbinfo)); + break; + case S_IFDIR: + inc_nlink(inode); + /* Some things misbehave if size == 0 on a directory */ + inode->i_size = 2 * BOGO_DIRENT_SIZE; + inode->i_op = &shmem_dir_inode_operations; + inode->i_fop = &simple_dir_operations; + break; + case S_IFLNK: + /* + * Must not load anything in the rbtree, + * mpol_free_shared_policy will not be called. + */ + mpol_shared_policy_init(&info->policy, NULL); + break; + } + } else + shmem_free_inode(sb); + return inode; +} + +#ifdef CONFIG_TMPFS +static const struct inode_operations shmem_symlink_inode_operations; +static const struct inode_operations shmem_symlink_inline_operations; + +/* + * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin; + * but providing them allows a tmpfs file to be used for splice, sendfile, and + * below the loop driver, in the generic fashion that many filesystems support. + */ +static int shmem_readpage(struct file *file, struct page *page) +{ + struct inode *inode = page->mapping->host; + int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL); + unlock_page(page); + return error; +} + +static int +shmem_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) +{ + struct inode *inode = mapping->host; + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + *pagep = NULL; + return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); +} + +static int +shmem_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) +{ + struct inode *inode = mapping->host; + + if (pos + copied > inode->i_size) + i_size_write(inode, pos + copied); + + set_page_dirty(page); + unlock_page(page); + page_cache_release(page); + + return copied; +} + +static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) +{ + struct inode *inode = filp->f_path.dentry->d_inode; + struct address_space *mapping = inode->i_mapping; + unsigned long index, offset; + enum sgp_type sgp = SGP_READ; + + /* + * Might this read be for a stacking filesystem? Then when reading + * holes of a sparse file, we actually need to allocate those pages, + * and even mark them dirty, so it cannot exceed the max_blocks limit. + */ + if (segment_eq(get_fs(), KERNEL_DS)) + sgp = SGP_DIRTY; + + index = *ppos >> PAGE_CACHE_SHIFT; + offset = *ppos & ~PAGE_CACHE_MASK; + + for (;;) { + struct page *page = NULL; + unsigned long end_index, nr, ret; + loff_t i_size = i_size_read(inode); + + end_index = i_size >> PAGE_CACHE_SHIFT; + if (index > end_index) + break; + if (index == end_index) { + nr = i_size & ~PAGE_CACHE_MASK; + if (nr <= offset) + break; + } + + desc->error = shmem_getpage(inode, index, &page, sgp, NULL); + if (desc->error) { + if (desc->error == -EINVAL) + desc->error = 0; + break; + } + if (page) + unlock_page(page); + + /* + * We must evaluate after, since reads (unlike writes) + * are called without i_mutex protection against truncate + */ + nr = PAGE_CACHE_SIZE; + i_size = i_size_read(inode); + end_index = i_size >> PAGE_CACHE_SHIFT; + if (index == end_index) { + nr = i_size & ~PAGE_CACHE_MASK; + if (nr <= offset) { + if (page) + page_cache_release(page); + break; + } + } + nr -= offset; + + if (page) { + /* + * If users can be writing to this page using arbitrary + * virtual addresses, take care about potential aliasing + * before reading the page on the kernel side. + */ + if (mapping_writably_mapped(mapping)) + flush_dcache_page(page); + /* + * Mark the page accessed if we read the beginning. + */ + if (!offset) + mark_page_accessed(page); + } else { + page = ZERO_PAGE(0); + page_cache_get(page); + } + + /* + * Ok, we have the page, and it's up-to-date, so + * now we can copy it to user space... + * + * The actor routine returns how many bytes were actually used.. + * NOTE! This may not be the same as how much of a user buffer + * we filled up (we may be padding etc), so we can only update + * "pos" here (the actor routine has to update the user buffer + * pointers and the remaining count). + */ + ret = actor(desc, page, offset, nr); + offset += ret; + index += offset >> PAGE_CACHE_SHIFT; + offset &= ~PAGE_CACHE_MASK; + + page_cache_release(page); + if (ret != nr || !desc->count) + break; + + cond_resched(); + } + + *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; + file_accessed(filp); +} + +static ssize_t shmem_file_aio_read(struct kiocb *iocb, + const struct iovec *iov, unsigned long nr_segs, loff_t pos) +{ + struct file *filp = iocb->ki_filp; + ssize_t retval; + unsigned long seg; + size_t count; + loff_t *ppos = &iocb->ki_pos; + + retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); + if (retval) + return retval; + + for (seg = 0; seg < nr_segs; seg++) { + read_descriptor_t desc; + + desc.written = 0; + desc.arg.buf = iov[seg].iov_base; + desc.count = iov[seg].iov_len; + if (desc.count == 0) + continue; + desc.error = 0; + do_shmem_file_read(filp, ppos, &desc, file_read_actor); + retval += desc.written; + if (desc.error) { + retval = retval ?: desc.error; + break; + } + if (desc.count > 0) + break; + } + return retval; +} + +static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); + + buf->f_type = TMPFS_MAGIC; + buf->f_bsize = PAGE_CACHE_SIZE; + buf->f_namelen = NAME_MAX; + spin_lock(&sbinfo->stat_lock); + if (sbinfo->max_blocks) { + buf->f_blocks = sbinfo->max_blocks; + buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; + } + if (sbinfo->max_inodes) { + buf->f_files = sbinfo->max_inodes; + buf->f_ffree = sbinfo->free_inodes; + } + /* else leave those fields 0 like simple_statfs */ + spin_unlock(&sbinfo->stat_lock); + return 0; +} + +/* + * File creation. Allocate an inode, and we're done.. + */ +static int +shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) +{ + struct inode *inode; + int error = -ENOSPC; + + inode = shmem_get_inode(dir->i_sb, mode, dev, VM_NORESERVE); + if (inode) { + error = security_inode_init_security(inode, dir, NULL, NULL, + NULL); + if (error) { + if (error != -EOPNOTSUPP) { + iput(inode); + return error; + } + } + error = shmem_acl_init(inode, dir); + if (error) { + iput(inode); + return error; + } + if (dir->i_mode & S_ISGID) { + inode->i_gid = dir->i_gid; + if (S_ISDIR(mode)) + inode->i_mode |= S_ISGID; + } + dir->i_size += BOGO_DIRENT_SIZE; + dir->i_ctime = dir->i_mtime = CURRENT_TIME; + d_instantiate(dentry, inode); + dget(dentry); /* Extra count - pin the dentry in core */ + } + return error; +} + +static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode) +{ + int error; + + if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) + return error; + inc_nlink(dir); + return 0; +} + +static int shmem_create(struct inode *dir, struct dentry *dentry, int mode, + struct nameidata *nd) +{ + return shmem_mknod(dir, dentry, mode | S_IFREG, 0); +} + +/* + * Link a file.. + */ +static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) +{ + struct inode *inode = old_dentry->d_inode; + int ret; + + /* + * No ordinary (disk based) filesystem counts links as inodes; + * but each new link needs a new dentry, pinning lowmem, and + * tmpfs dentries cannot be pruned until they are unlinked. + */ + ret = shmem_reserve_inode(inode->i_sb); + if (ret) + goto out; + + dir->i_size += BOGO_DIRENT_SIZE; + inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; + inc_nlink(inode); + atomic_inc(&inode->i_count); /* New dentry reference */ + dget(dentry); /* Extra pinning count for the created dentry */ + d_instantiate(dentry, inode); +out: + return ret; +} + +static int shmem_unlink(struct inode *dir, struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + + if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) + shmem_free_inode(inode->i_sb); + + dir->i_size -= BOGO_DIRENT_SIZE; + inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; + drop_nlink(inode); + dput(dentry); /* Undo the count from "create" - this does all the work */ + return 0; +} + +static int shmem_rmdir(struct inode *dir, struct dentry *dentry) +{ + if (!simple_empty(dentry)) + return -ENOTEMPTY; + + drop_nlink(dentry->d_inode); + drop_nlink(dir); + return shmem_unlink(dir, dentry); +} + +/* + * The VFS layer already does all the dentry stuff for rename, + * we just have to decrement the usage count for the target if + * it exists so that the VFS layer correctly free's it when it + * gets overwritten. + */ +static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) +{ + struct inode *inode = old_dentry->d_inode; + int they_are_dirs = S_ISDIR(inode->i_mode); + + if (!simple_empty(new_dentry)) + return -ENOTEMPTY; + + if (new_dentry->d_inode) { + (void) shmem_unlink(new_dir, new_dentry); + if (they_are_dirs) + drop_nlink(old_dir); + } else if (they_are_dirs) { + drop_nlink(old_dir); + inc_nlink(new_dir); + } + + old_dir->i_size -= BOGO_DIRENT_SIZE; + new_dir->i_size += BOGO_DIRENT_SIZE; + old_dir->i_ctime = old_dir->i_mtime = + new_dir->i_ctime = new_dir->i_mtime = + inode->i_ctime = CURRENT_TIME; + return 0; +} + +static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) +{ + int error; + int len; + struct inode *inode; + struct page *page = NULL; + char *kaddr; + struct shmem_inode_info *info; + + len = strlen(symname) + 1; + if (len > PAGE_CACHE_SIZE) + return -ENAMETOOLONG; + + inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); + if (!inode) + return -ENOSPC; + + error = security_inode_init_security(inode, dir, NULL, NULL, + NULL); + if (error) { + if (error != -EOPNOTSUPP) { + iput(inode); + return error; + } + error = 0; + } + + info = SHMEM_I(inode); + inode->i_size = len-1; + if (len <= (char *)inode - (char *)info) { + /* do it inline */ + memcpy(info, symname, len); + inode->i_op = &shmem_symlink_inline_operations; + } else { + error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); + if (error) { + iput(inode); + return error; + } + inode->i_mapping->a_ops = &shmem_aops; + inode->i_op = &shmem_symlink_inode_operations; + kaddr = kmap_atomic(page, KM_USER0); + memcpy(kaddr, symname, len); + kunmap_atomic(kaddr, KM_USER0); + set_page_dirty(page); + unlock_page(page); + page_cache_release(page); + } + if (dir->i_mode & S_ISGID) + inode->i_gid = dir->i_gid; + dir->i_size += BOGO_DIRENT_SIZE; + dir->i_ctime = dir->i_mtime = CURRENT_TIME; + d_instantiate(dentry, inode); + dget(dentry); + return 0; +} + +static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) +{ + nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); + return NULL; +} + +static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) +{ + struct page *page = NULL; + int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); + nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); + if (page) + unlock_page(page); + return page; +} + +static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) +{ + if (!IS_ERR(nd_get_link(nd))) { + struct page *page = cookie; + kunmap(page); + mark_page_accessed(page); + page_cache_release(page); + } +} + +static const struct inode_operations shmem_symlink_inline_operations = { + .readlink = generic_readlink, + .follow_link = shmem_follow_link_inline, +}; + +static const struct inode_operations shmem_symlink_inode_operations = { + .truncate = shmem_truncate, + .readlink = generic_readlink, + .follow_link = shmem_follow_link, + .put_link = shmem_put_link, +}; + +#ifdef CONFIG_TMPFS_POSIX_ACL +/* + * Superblocks without xattr inode operations will get security.* xattr + * support from the VFS "for free". As soon as we have any other xattrs + * like ACLs, we also need to implement the security.* handlers at + * filesystem level, though. + */ + +static size_t shmem_xattr_security_list(struct inode *inode, char *list, + size_t list_len, const char *name, + size_t name_len) +{ + return security_inode_listsecurity(inode, list, list_len); +} + +static int shmem_xattr_security_get(struct inode *inode, const char *name, + void *buffer, size_t size) +{ + if (strcmp(name, "") == 0) + return -EINVAL; + return xattr_getsecurity(inode, name, buffer, size); +} + +static int shmem_xattr_security_set(struct inode *inode, const char *name, + const void *value, size_t size, int flags) +{ + if (strcmp(name, "") == 0) + return -EINVAL; + return security_inode_setsecurity(inode, name, value, size, flags); +} + +static struct xattr_handler shmem_xattr_security_handler = { + .prefix = XATTR_SECURITY_PREFIX, + .list = shmem_xattr_security_list, + .get = shmem_xattr_security_get, + .set = shmem_xattr_security_set, +}; + +static struct xattr_handler *shmem_xattr_handlers[] = { + &shmem_xattr_acl_access_handler, + &shmem_xattr_acl_default_handler, + &shmem_xattr_security_handler, + NULL +}; +#endif + +static struct dentry *shmem_get_parent(struct dentry *child) +{ + return ERR_PTR(-ESTALE); +} + +static int shmem_match(struct inode *ino, void *vfh) +{ + __u32 *fh = vfh; + __u64 inum = fh[2]; + inum = (inum << 32) | fh[1]; + return ino->i_ino == inum && fh[0] == ino->i_generation; +} + +static struct dentry *shmem_fh_to_dentry(struct super_block *sb, + struct fid *fid, int fh_len, int fh_type) +{ + struct inode *inode; + struct dentry *dentry = NULL; + u64 inum = fid->raw[2]; + inum = (inum << 32) | fid->raw[1]; + + if (fh_len < 3) + return NULL; + + inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), + shmem_match, fid->raw); + if (inode) { + dentry = d_find_alias(inode); + iput(inode); + } + + return dentry; +} + +static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, + int connectable) +{ + struct inode *inode = dentry->d_inode; + + if (*len < 3) + return 255; + + if (hlist_unhashed(&inode->i_hash)) { + /* Unfortunately insert_inode_hash is not idempotent, + * so as we hash inodes here rather than at creation + * time, we need a lock to ensure we only try + * to do it once + */ + static DEFINE_SPINLOCK(lock); + spin_lock(&lock); + if (hlist_unhashed(&inode->i_hash)) + __insert_inode_hash(inode, + inode->i_ino + inode->i_generation); + spin_unlock(&lock); + } + + fh[0] = inode->i_generation; + fh[1] = inode->i_ino; + fh[2] = ((__u64)inode->i_ino) >> 32; + + *len = 3; + return 1; +} + +static const struct export_operations shmem_export_ops = { + .get_parent = shmem_get_parent, + .encode_fh = shmem_encode_fh, + .fh_to_dentry = shmem_fh_to_dentry, +}; + +static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, + bool remount) +{ + char *this_char, *value, *rest; + + while (options != NULL) { + this_char = options; + for (;;) { + /* + * NUL-terminate this option: unfortunately, + * mount options form a comma-separated list, + * but mpol's nodelist may also contain commas. + */ + options = strchr(options, ','); + if (options == NULL) + break; + options++; + if (!isdigit(*options)) { + options[-1] = '\0'; + break; + } + } + if (!*this_char) + continue; + if ((value = strchr(this_char,'=')) != NULL) { + *value++ = 0; + } else { + printk(KERN_ERR + "tmpfs: No value for mount option '%s'\n", + this_char); + return 1; + } + + if (!strcmp(this_char,"size")) { + unsigned long long size; + size = memparse(value,&rest); + if (*rest == '%') { + size <<= PAGE_SHIFT; + size *= totalram_pages; + do_div(size, 100); + rest++; + } + if (*rest) + goto bad_val; + sbinfo->max_blocks = + DIV_ROUND_UP(size, PAGE_CACHE_SIZE); + } else if (!strcmp(this_char,"nr_blocks")) { + sbinfo->max_blocks = memparse(value, &rest); + if (*rest) + goto bad_val; + } else if (!strcmp(this_char,"nr_inodes")) { + sbinfo->max_inodes = memparse(value, &rest); + if (*rest) + goto bad_val; + } else if (!strcmp(this_char,"mode")) { + if (remount) + continue; + sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; + if (*rest) + goto bad_val; + } else if (!strcmp(this_char,"uid")) { + if (remount) + continue; + sbinfo->uid = simple_strtoul(value, &rest, 0); + if (*rest) + goto bad_val; + } else if (!strcmp(this_char,"gid")) { + if (remount) + continue; + sbinfo->gid = simple_strtoul(value, &rest, 0); + if (*rest) + goto bad_val; + } else if (!strcmp(this_char,"mpol")) { + if (mpol_parse_str(value, &sbinfo->mpol, 1)) + goto bad_val; + } else { + printk(KERN_ERR "tmpfs: Bad mount option %s\n", + this_char); + return 1; + } + } + return 0; + +bad_val: + printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", + value, this_char); + return 1; + +} + +static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) +{ + struct shmem_sb_info *sbinfo = SHMEM_SB(sb); + struct shmem_sb_info config = *sbinfo; + unsigned long blocks; + unsigned long inodes; + int error = -EINVAL; + + if (shmem_parse_options(data, &config, true)) + return error; + + spin_lock(&sbinfo->stat_lock); + blocks = sbinfo->max_blocks - sbinfo->free_blocks; + inodes = sbinfo->max_inodes - sbinfo->free_inodes; + if (config.max_blocks < blocks) + goto out; + if (config.max_inodes < inodes) + goto out; + /* + * Those tests also disallow limited->unlimited while any are in + * use, so i_blocks will always be zero when max_blocks is zero; + * but we must separately disallow unlimited->limited, because + * in that case we have no record of how much is already in use. + */ + if (config.max_blocks && !sbinfo->max_blocks) + goto out; + if (config.max_inodes && !sbinfo->max_inodes) + goto out; + + error = 0; + sbinfo->max_blocks = config.max_blocks; + sbinfo->free_blocks = config.max_blocks - blocks; + sbinfo->max_inodes = config.max_inodes; + sbinfo->free_inodes = config.max_inodes - inodes; + + mpol_put(sbinfo->mpol); + sbinfo->mpol = config.mpol; /* transfers initial ref */ +out: + spin_unlock(&sbinfo->stat_lock); + return error; +} + +static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs) +{ + struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb); + + if (sbinfo->max_blocks != shmem_default_max_blocks()) + seq_printf(seq, ",size=%luk", + sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); + if (sbinfo->max_inodes != shmem_default_max_inodes()) + seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); + if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) + seq_printf(seq, ",mode=%03o", sbinfo->mode); + if (sbinfo->uid != 0) + seq_printf(seq, ",uid=%u", sbinfo->uid); + if (sbinfo->gid != 0) + seq_printf(seq, ",gid=%u", sbinfo->gid); + shmem_show_mpol(seq, sbinfo->mpol); + return 0; +} +#endif /* CONFIG_TMPFS */ + +static void shmem_put_super(struct super_block *sb) +{ + kfree(sb->s_fs_info); + sb->s_fs_info = NULL; +} + +int shmem_fill_super(struct super_block *sb, void *data, int silent) +{ + struct inode *inode; + struct dentry *root; + struct shmem_sb_info *sbinfo; + int err = -ENOMEM; + + /* Round up to L1_CACHE_BYTES to resist false sharing */ + sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), + L1_CACHE_BYTES), GFP_KERNEL); + if (!sbinfo) + return -ENOMEM; + + sbinfo->mode = S_IRWXUGO | S_ISVTX; + sbinfo->uid = current_fsuid(); + sbinfo->gid = current_fsgid(); + sb->s_fs_info = sbinfo; + +#ifdef CONFIG_TMPFS + /* + * Per default we only allow half of the physical ram per + * tmpfs instance, limiting inodes to one per page of lowmem; + * but the internal instance is left unlimited. + */ + if (!(sb->s_flags & MS_NOUSER)) { + sbinfo->max_blocks = shmem_default_max_blocks(); + sbinfo->max_inodes = shmem_default_max_inodes(); + if (shmem_parse_options(data, sbinfo, false)) { + err = -EINVAL; + goto failed; + } + } + sb->s_export_op = &shmem_export_ops; +#else + sb->s_flags |= MS_NOUSER; +#endif + + spin_lock_init(&sbinfo->stat_lock); + sbinfo->free_blocks = sbinfo->max_blocks; + sbinfo->free_inodes = sbinfo->max_inodes; + + sb->s_maxbytes = SHMEM_MAX_BYTES; + sb->s_blocksize = PAGE_CACHE_SIZE; + sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_magic = TMPFS_MAGIC; + sb->s_op = &shmem_ops; + sb->s_time_gran = 1; +#ifdef CONFIG_TMPFS_POSIX_ACL + sb->s_xattr = shmem_xattr_handlers; + sb->s_flags |= MS_POSIXACL; +#endif + + inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); + if (!inode) + goto failed; + inode->i_uid = sbinfo->uid; + inode->i_gid = sbinfo->gid; + root = d_alloc_root(inode); + if (!root) + goto failed_iput; + sb->s_root = root; + return 0; + +failed_iput: + iput(inode); +failed: + shmem_put_super(sb); + return err; +} + +static struct kmem_cache *shmem_inode_cachep; + +static struct inode *shmem_alloc_inode(struct super_block *sb) +{ + struct shmem_inode_info *p; + p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); + if (!p) + return NULL; + return &p->vfs_inode; +} + +static void shmem_destroy_inode(struct inode *inode) +{ + if ((inode->i_mode & S_IFMT) == S_IFREG) { + /* only struct inode is valid if it's an inline symlink */ + mpol_free_shared_policy(&SHMEM_I(inode)->policy); + } + kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); +} + +static void init_once(void *foo) +{ + struct shmem_inode_info *p = (struct shmem_inode_info *) foo; + + inode_init_once(&p->vfs_inode); +} + +static int init_inodecache(void) +{ + shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", + sizeof(struct shmem_inode_info), + 0, SLAB_PANIC, init_once); + return 0; +} + +static void destroy_inodecache(void) +{ + kmem_cache_destroy(shmem_inode_cachep); +} + +static const struct address_space_operations shmem_aops = { + .writepage = shmem_writepage, + .set_page_dirty = __set_page_dirty_no_writeback, +#ifdef CONFIG_TMPFS + .readpage = shmem_readpage, + .write_begin = shmem_write_begin, + .write_end = shmem_write_end, +#endif + .migratepage = migrate_page, + .error_remove_page = generic_error_remove_page, +}; + +static const struct file_operations shmem_file_operations = { + .mmap = shmem_mmap, +#ifdef CONFIG_TMPFS + .llseek = generic_file_llseek, + .read = do_sync_read, + .write = do_sync_write, + .aio_read = shmem_file_aio_read, + .aio_write = generic_file_aio_write, + .fsync = simple_sync_file, + .splice_read = generic_file_splice_read, + .splice_write = generic_file_splice_write, +#endif +}; + +static const struct inode_operations shmem_inode_operations = { + .truncate = shmem_truncate, + .setattr = shmem_notify_change, + .truncate_range = shmem_truncate_range, +#ifdef CONFIG_TMPFS_POSIX_ACL + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .listxattr = generic_listxattr, + .removexattr = generic_removexattr, + .check_acl = shmem_check_acl, +#endif + +}; + +static const struct inode_operations shmem_dir_inode_operations = { +#ifdef CONFIG_TMPFS + .create = shmem_create, + .lookup = simple_lookup, + .link = shmem_link, + .unlink = shmem_unlink, + .symlink = shmem_symlink, + .mkdir = shmem_mkdir, + .rmdir = shmem_rmdir, + .mknod = shmem_mknod, + .rename = shmem_rename, +#endif +#ifdef CONFIG_TMPFS_POSIX_ACL + .setattr = shmem_notify_change, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .listxattr = generic_listxattr, + .removexattr = generic_removexattr, + .check_acl = shmem_check_acl, +#endif +}; + +static const struct inode_operations shmem_special_inode_operations = { +#ifdef CONFIG_TMPFS_POSIX_ACL + .setattr = shmem_notify_change, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .listxattr = generic_listxattr, + .removexattr = generic_removexattr, + .check_acl = shmem_check_acl, +#endif +}; + +static const struct super_operations shmem_ops = { + .alloc_inode = shmem_alloc_inode, + .destroy_inode = shmem_destroy_inode, +#ifdef CONFIG_TMPFS + .statfs = shmem_statfs, + .remount_fs = shmem_remount_fs, + .show_options = shmem_show_options, +#endif + .delete_inode = shmem_delete_inode, + .drop_inode = generic_delete_inode, + .put_super = shmem_put_super, +}; + +static const struct vm_operations_struct shmem_vm_ops = { + .fault = shmem_fault, +#ifdef CONFIG_NUMA + .set_policy = shmem_set_policy, + .get_policy = shmem_get_policy, +#endif +}; + + +static int shmem_get_sb(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data, struct vfsmount *mnt) +{ + return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt); +} + +static struct file_system_type tmpfs_fs_type = { + .owner = THIS_MODULE, + .name = "tmpfs", + .get_sb = shmem_get_sb, + .kill_sb = kill_litter_super, +}; + +int __init init_tmpfs(void) +{ + int error; + + error = bdi_init(&shmem_backing_dev_info); + if (error) + goto out4; + + error = init_inodecache(); + if (error) + goto out3; + + error = register_filesystem(&tmpfs_fs_type); + if (error) { + printk(KERN_ERR "Could not register tmpfs\n"); + goto out2; + } + + shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER, + tmpfs_fs_type.name, NULL); + if (IS_ERR(shm_mnt)) { + error = PTR_ERR(shm_mnt); + printk(KERN_ERR "Could not kern_mount tmpfs\n"); + goto out1; + } + return 0; + +out1: + unregister_filesystem(&tmpfs_fs_type); +out2: + destroy_inodecache(); +out3: + bdi_destroy(&shmem_backing_dev_info); +out4: + shm_mnt = ERR_PTR(error); + return error; +} + +#else /* !CONFIG_SHMEM */ + +/* + * tiny-shmem: simple shmemfs and tmpfs using ramfs code + * + * This is intended for small system where the benefits of the full + * shmem code (swap-backed and resource-limited) are outweighed by + * their complexity. On systems without swap this code should be + * effectively equivalent, but much lighter weight. + */ + +#include + +static struct file_system_type tmpfs_fs_type = { + .name = "tmpfs", + .get_sb = ramfs_get_sb, + .kill_sb = kill_litter_super, +}; + +int __init init_tmpfs(void) +{ + BUG_ON(register_filesystem(&tmpfs_fs_type) != 0); + + shm_mnt = kern_mount(&tmpfs_fs_type); + BUG_ON(IS_ERR(shm_mnt)); + + return 0; +} + +int shmem_unuse(swp_entry_t entry, struct page *page) +{ + return 0; +} + +int shmem_lock(struct file *file, int lock, struct user_struct *user) +{ + return 0; +} + +#define shmem_vm_ops generic_file_vm_ops +#define shmem_file_operations ramfs_file_operations +#define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev) +#define shmem_acct_size(flags, size) 0 +#define shmem_unacct_size(flags, size) do {} while (0) +#define SHMEM_MAX_BYTES MAX_LFS_FILESIZE + +#endif /* CONFIG_SHMEM */ + +/* common code */ + +/** + * shmem_file_setup - get an unlinked file living in tmpfs + * @name: name for dentry (to be seen in /proc//maps + * @size: size to be set for the file + * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size + */ +struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) +{ + int error; + struct file *file; + struct inode *inode; + struct dentry *dentry, *root; + struct qstr this; + + if (IS_ERR(shm_mnt)) + return (void *)shm_mnt; + + if (size < 0 || size > SHMEM_MAX_BYTES) + return ERR_PTR(-EINVAL); + + if (shmem_acct_size(flags, size)) + return ERR_PTR(-ENOMEM); + + error = -ENOMEM; + this.name = name; + this.len = strlen(name); + this.hash = 0; /* will go */ + root = shm_mnt->mnt_root; + dentry = d_alloc(root, &this); + if (!dentry) + goto put_memory; + + error = -ENFILE; + file = get_empty_filp(); + if (!file) + goto put_dentry; + + error = -ENOSPC; + inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags); + if (!inode) + goto close_file; + + d_instantiate(dentry, inode); + inode->i_size = size; + inode->i_nlink = 0; /* It is unlinked */ + init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ, + &shmem_file_operations); + +#ifndef CONFIG_MMU + error = ramfs_nommu_expand_for_mapping(inode, size); + if (error) + goto close_file; +#endif + ima_counts_get(file); + return file; + +close_file: + put_filp(file); +put_dentry: + dput(dentry); +put_memory: + shmem_unacct_size(flags, size); + return ERR_PTR(error); +} +EXPORT_SYMBOL_GPL(shmem_file_setup); + +/** + * shmem_zero_setup - setup a shared anonymous mapping + * @vma: the vma to be mmapped is prepared by do_mmap_pgoff + */ +int shmem_zero_setup(struct vm_area_struct *vma) +{ + struct file *file; + loff_t size = vma->vm_end - vma->vm_start; + + file = shmem_file_setup("dev/zero", size, vma->vm_flags); + if (IS_ERR(file)) + return PTR_ERR(file); + + if (vma->vm_file) + fput(vma->vm_file); + vma->vm_file = file; + vma->vm_ops = &shmem_vm_ops; + vma->vm_flags |= VM_CAN_NONLINEAR; + return 0; +} diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/slab.c kernel-2.6.32.54.vs/linux-2.6.32/mm/slab.c --- kernel-2.6.32.54/linux-2.6.32/mm/slab.c 2012-01-16 15:01:39.984725179 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/slab.c 2012-01-16 14:51:22.081408127 +0100 @@ -431,6 +431,8 @@ #define STATS_INC_FREEMISS(x) do { } while (0) #endif +#include "slab_vs.h" + #if DEBUG /* @@ -3251,6 +3253,7 @@ obj = slab_get_obj(cachep, slabp, nodeid); check_slabp(cachep, slabp); + vx_slab_alloc(cachep, flags); l3->free_objects--; /* move slabp to correct slabp list: */ list_del(&slabp->list); @@ -3327,6 +3330,7 @@ /* ___cache_alloc_node can fall back to other nodes */ ptr = ____cache_alloc_node(cachep, flags, nodeid); out: + vx_slab_alloc(cachep, flags); local_irq_restore(save_flags); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, @@ -3513,6 +3517,7 @@ check_irq_off(); kmemleak_free_recursive(objp, cachep->flags); objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); + vx_slab_free(cachep); kmemcheck_slab_free(cachep, objp, obj_size(cachep)); diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/slab_vs.h kernel-2.6.32.54.vs/linux-2.6.32/mm/slab_vs.h --- kernel-2.6.32.54/linux-2.6.32/mm/slab_vs.h 1970-01-01 01:00:00.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/slab_vs.h 2012-01-16 14:51:22.081408127 +0100 @@ -0,0 +1,29 @@ + +#include + +#include + +static inline +void vx_slab_alloc(struct kmem_cache *cachep, gfp_t flags) +{ + int what = gfp_zone(cachep->gfpflags); + struct vx_info *vxi = current_vx_info(); + + if (!vxi) + return; + + atomic_add(cachep->buffer_size, &vxi->cacct.slab[what]); +} + +static inline +void vx_slab_free(struct kmem_cache *cachep) +{ + int what = gfp_zone(cachep->gfpflags); + struct vx_info *vxi = current_vx_info(); + + if (!vxi) + return; + + atomic_sub(cachep->buffer_size, &vxi->cacct.slab[what]); +} + diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/swapfile.c kernel-2.6.32.54.vs/linux-2.6.32/mm/swapfile.c --- kernel-2.6.32.54/linux-2.6.32/mm/swapfile.c 2012-01-16 15:01:40.312724018 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/swapfile.c 2012-01-16 14:51:22.085408113 +0100 @@ -34,6 +34,8 @@ #include #include #include +#include +#include static DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; @@ -1687,6 +1689,8 @@ if (v == SEQ_START_TOKEN) ptr = swap_info; else { + if (vx_flags(VXF_VIRT_MEM, 0)) + return NULL; ptr = v; ptr++; } @@ -1714,6 +1718,16 @@ if (ptr == SEQ_START_TOKEN) { seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"); + if (vx_flags(VXF_VIRT_MEM, 0)) { + struct sysinfo si; + + vx_vsi_swapinfo(&si); + if (si.totalswap < (1 << 10)) + return 0; + seq_printf(swap, "%s\t\t\t\t\t%s\t%lu\t%lu\t%d\n", + "hdv0", "partition", si.totalswap >> 10, + (si.totalswap - si.freeswap) >> 10, -1); + } return 0; } @@ -2072,6 +2086,8 @@ val->freeswap = nr_swap_pages + nr_to_be_unused; val->totalswap = total_swap_pages + nr_to_be_unused; spin_unlock(&swap_lock); + if (vx_flags(VXF_VIRT_MEM, 0)) + vx_vsi_swapinfo(val); } EXPORT_SYMBOL_GPL(si_swapinfo); diff -Nur kernel-2.6.32.54/linux-2.6.32/mm/swapfile.c.orig kernel-2.6.32.54.vs/linux-2.6.32/mm/swapfile.c.orig --- kernel-2.6.32.54/linux-2.6.32/mm/swapfile.c.orig 2012-01-16 15:01:39.984725179 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/mm/swapfile.c.orig 2012-01-16 14:47:19.502254760 +0100 @@ -497,6 +497,7 @@ spin_unlock(&swap_lock); return (swp_entry_t) {0}; } +EXPORT_SYMBOL_GPL(get_swap_page); /* The only caller of this function is now susupend routine */ swp_entry_t get_swap_page_of_type(int type) @@ -519,6 +520,7 @@ spin_unlock(&swap_lock); return (swp_entry_t) {0}; } +EXPORT_SYMBOL_GPL(get_swap_page_of_type); static struct swap_info_struct * swap_info_get(swp_entry_t entry) { @@ -631,6 +633,7 @@ } return; } +EXPORT_SYMBOL_GPL(swap_free); /* * How many references to page are currently swapped out? @@ -1307,6 +1310,7 @@ BUG_ON(se == start_se); /* It *must* be present */ } } +EXPORT_SYMBOL_GPL(map_swap_page); #ifdef CONFIG_HIBERNATION /* @@ -1650,6 +1654,7 @@ out: return err; } +EXPORT_SYMBOL_GPL(sys_swapoff); #ifdef CONFIG_PROC_FS /* iterator */ @@ -2050,6 +2055,7 @@ } return error; } +EXPORT_SYMBOL_GPL(sys_swapon); void si_swapinfo(struct sysinfo *val) { @@ -2067,6 +2073,7 @@ val->totalswap = total_swap_pages + nr_to_be_unused; spin_unlock(&swap_lock); } +EXPORT_SYMBOL_GPL(si_swapinfo); /* * Verify that a swap entry is valid and increment its swap map count. @@ -2167,6 +2174,7 @@ { return &swap_info[type]; } +EXPORT_SYMBOL_GPL(get_swap_info_struct); /* * swap_lock prevents swap_map being freed. Don't grab an extra diff -Nur kernel-2.6.32.54/linux-2.6.32/net/core/dev.c kernel-2.6.32.54.vs/linux-2.6.32/net/core/dev.c --- kernel-2.6.32.54/linux-2.6.32/net/core/dev.c 2012-01-16 15:01:40.480723423 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/core/dev.c 2012-01-16 14:51:22.085408113 +0100 @@ -129,6 +129,7 @@ #include #include #include +#include #include #include "net-sysfs.h" @@ -594,7 +595,8 @@ hlist_for_each(p, dev_name_hash(net, name)) { struct net_device *dev = hlist_entry(p, struct net_device, name_hlist); - if (!strncmp(dev->name, name, IFNAMSIZ)) + if (!strncmp(dev->name, name, IFNAMSIZ) && + nx_dev_visible(current_nx_info(), dev)) return dev; } return NULL; @@ -645,7 +647,8 @@ hlist_for_each(p, dev_index_hash(net, ifindex)) { struct net_device *dev = hlist_entry(p, struct net_device, index_hlist); - if (dev->ifindex == ifindex) + if ((dev->ifindex == ifindex) && + nx_dev_visible(current_nx_info(), dev)) return dev; } return NULL; @@ -698,10 +701,12 @@ ASSERT_RTNL(); - for_each_netdev(net, dev) + for_each_netdev(net, dev) { if (dev->type == type && - !memcmp(dev->dev_addr, ha, dev->addr_len)) + !memcmp(dev->dev_addr, ha, dev->addr_len) && + nx_dev_visible(current_nx_info(), dev)) return dev; + } return NULL; } @@ -712,9 +717,11 @@ struct net_device *dev; ASSERT_RTNL(); - for_each_netdev(net, dev) - if (dev->type == type) + for_each_netdev(net, dev) { + if ((dev->type == type) && + nx_dev_visible(current_nx_info(), dev)) return dev; + } return NULL; } @@ -833,6 +840,8 @@ continue; if (i < 0 || i >= max_netdevices) continue; + if (!nx_dev_visible(current_nx_info(), d)) + continue; /* avoid cases where sscanf is not exact inverse of printf */ snprintf(buf, IFNAMSIZ, name, i); @@ -3010,6 +3019,8 @@ total = 0; for_each_netdev(net, dev) { + if (!nx_dev_visible(current_nx_info(), dev)) + continue; for (i = 0; i < NPROTO; i++) { if (gifconf_list[i]) { int done; @@ -3078,6 +3089,9 @@ { const struct net_device_stats *stats = dev_get_stats(dev); + if (!nx_dev_visible(current_nx_info(), dev)) + return; + seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", dev->name, stats->rx_bytes, stats->rx_packets, @@ -5343,7 +5357,6 @@ if (dev->dev.parent) goto out; #endif - /* Ensure the device has been registrered */ err = -EINVAL; if (dev->reg_state != NETREG_REGISTERED) diff -Nur kernel-2.6.32.54/linux-2.6.32/net/core/dev.c.orig kernel-2.6.32.54.vs/linux-2.6.32/net/core/dev.c.orig --- kernel-2.6.32.54/linux-2.6.32/net/core/dev.c.orig 2012-01-16 15:01:40.008725094 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/core/dev.c.orig 2012-01-16 14:47:19.650254245 +0100 @@ -96,6 +96,9 @@ #include #include #include +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) +#include +#endif #include #include #include @@ -1712,7 +1715,11 @@ int rc; if (likely(!skb->next)) { - if (!list_empty(&ptype_all)) + if (!list_empty(&ptype_all) +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) + && !(skb->imq_flags & IMQ_F_ENQUEUE) +#endif + ) dev_queue_xmit_nit(skb, dev); if (netif_needs_gso(dev, skb)) { @@ -1805,8 +1812,7 @@ } EXPORT_SYMBOL(skb_tx_hash); -static struct netdev_queue *dev_pick_tx(struct net_device *dev, - struct sk_buff *skb) +struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb) { const struct net_device_ops *ops = dev->netdev_ops; u16 queue_index = 0; @@ -1819,6 +1825,7 @@ skb_set_queue_mapping(skb, queue_index); return netdev_get_tx_queue(dev, queue_index); } +EXPORT_SYMBOL(dev_pick_tx); static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, diff -Nur kernel-2.6.32.54/linux-2.6.32/net/core/rtnetlink.c kernel-2.6.32.54.vs/linux-2.6.32/net/core/rtnetlink.c --- kernel-2.6.32.54/linux-2.6.32/net/core/rtnetlink.c 2012-01-16 15:01:40.044724966 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/core/rtnetlink.c 2012-01-16 14:51:22.085408113 +0100 @@ -688,6 +688,8 @@ idx = 0; for_each_netdev(net, dev) { + if (!nx_dev_visible(skb->sk->sk_nx_info, dev)) + continue; if (idx < s_idx) goto cont; if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, @@ -1222,6 +1224,9 @@ struct sk_buff *skb; int err = -ENOBUFS; + if (!nx_dev_visible(current_nx_info(), dev)) + return; + skb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL); if (skb == NULL) goto errout; diff -Nur kernel-2.6.32.54/linux-2.6.32/net/core/sock.c kernel-2.6.32.54.vs/linux-2.6.32/net/core/sock.c --- kernel-2.6.32.54/linux-2.6.32/net/core/sock.c 2012-01-16 15:01:40.044724966 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/core/sock.c 2012-01-16 14:51:22.089408099 +0100 @@ -125,6 +125,10 @@ #include #include +#include +#include +#include +#include #ifdef CONFIG_INET #include @@ -984,6 +988,8 @@ if (!try_module_get(prot->owner)) goto out_free_sec; } + sock_vx_init(sk); + sock_nx_init(sk); return sk; @@ -1063,6 +1069,11 @@ __func__, atomic_read(&sk->sk_omem_alloc)); put_net(sock_net(sk)); + vx_sock_dec(sk); + clr_vx_info(&sk->sk_vx_info); + sk->sk_xid = -1; + clr_nx_info(&sk->sk_nx_info); + sk->sk_nid = -1; sk_prot_free(sk->sk_prot_creator, sk); } @@ -1110,6 +1121,8 @@ /* SANITY */ get_net(sock_net(newsk)); + sock_vx_init(newsk); + sock_nx_init(newsk); sk_node_init(&newsk->sk_node); sock_lock_init(newsk); bh_lock_sock(newsk); @@ -1164,6 +1177,12 @@ smp_wmb(); atomic_set(&newsk->sk_refcnt, 2); + set_vx_info(&newsk->sk_vx_info, sk->sk_vx_info); + newsk->sk_xid = sk->sk_xid; + vx_sock_inc(newsk); + set_nx_info(&newsk->sk_nx_info, sk->sk_nx_info); + newsk->sk_nid = sk->sk_nid; + /* * Increment the counter in the same struct proto as the master * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that @@ -1886,6 +1905,12 @@ sk->sk_stamp = ktime_set(-1L, 0); + set_vx_info(&sk->sk_vx_info, current_vx_info()); + sk->sk_xid = vx_current_xid(); + vx_sock_inc(sk); + set_nx_info(&sk->sk_nx_info, current_nx_info()); + sk->sk_nid = nx_current_nid(); + /* * Before updating sk_refcnt, we must commit prior changes to memory * (Documentation/RCU/rculist_nulls.txt for details) diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv4/af_inet.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/af_inet.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv4/af_inet.c 2012-01-16 15:01:40.048724952 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/af_inet.c 2012-01-16 14:51:22.093408085 +0100 @@ -115,6 +115,7 @@ #ifdef CONFIG_IP_MROUTE #include #endif +#include /* The inetsw table contains everything that inet_create needs to @@ -325,9 +326,12 @@ } err = -EPERM; + if ((protocol == IPPROTO_ICMP) && + nx_capable(answer->capability, NXC_RAW_ICMP)) + goto override; if (answer->capability > 0 && !capable(answer->capability)) goto out_rcu_unlock; - +override: err = -EAFNOSUPPORT; if (!inet_netns_ok(net, protocol)) goto out_rcu_unlock; @@ -447,6 +451,7 @@ struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; struct sock *sk = sock->sk; struct inet_sock *inet = inet_sk(sk); + struct nx_v4_sock_addr nsa; unsigned short snum; int chk_addr_ret; int err; @@ -463,7 +468,11 @@ if (addr->sin_family != AF_INET) goto out; - chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); + err = v4_map_sock_addr(inet, addr, &nsa); + if (err) + goto out; + + chk_addr_ret = inet_addr_type(sock_net(sk), nsa.saddr); /* Not specified by any standard per-se, however it breaks too * many applications when removed. It is unfortunate since @@ -475,7 +484,7 @@ err = -EADDRNOTAVAIL; if (!sysctl_ip_nonlocal_bind && !(inet->freebind || inet->transparent) && - addr->sin_addr.s_addr != htonl(INADDR_ANY) && + nsa.saddr != htonl(INADDR_ANY) && chk_addr_ret != RTN_LOCAL && chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) @@ -500,7 +509,7 @@ if (sk->sk_state != TCP_CLOSE || inet->num) goto out_release_sock; - inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr; + v4_set_sock_addr(inet, &nsa); if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->saddr = 0; /* Use device */ @@ -697,11 +706,13 @@ peer == 1)) return -ENOTCONN; sin->sin_port = inet->dport; - sin->sin_addr.s_addr = inet->daddr; + sin->sin_addr.s_addr = + nx_map_sock_lback(sk->sk_nx_info, inet->daddr); } else { __be32 addr = inet->rcv_saddr; if (!addr) addr = inet->saddr; + addr = nx_map_sock_lback(sk->sk_nx_info, addr); sin->sin_port = inet->sport; sin->sin_addr.s_addr = addr; } diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv4/devinet.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/devinet.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv4/devinet.c 2012-01-16 15:01:40.048724952 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/devinet.c 2012-01-16 14:51:22.093408085 +0100 @@ -413,6 +413,7 @@ return in_dev; } + /* Called only from RTNL semaphored context. No locks. */ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, @@ -653,6 +654,8 @@ *colon = ':'; if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) { + struct nx_info *nxi = current_nx_info(); + if (tryaddrmatch) { /* Matthias Andree */ /* compare label and address (4.4BSD style) */ @@ -661,6 +664,8 @@ This is checked above. */ for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL; ifap = &ifa->ifa_next) { + if (!nx_v4_ifa_visible(nxi, ifa)) + continue; if (!strcmp(ifr.ifr_name, ifa->ifa_label) && sin_orig.sin_addr.s_addr == ifa->ifa_address) { @@ -673,9 +678,12 @@ comparing just the label */ if (!ifa) { for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL; - ifap = &ifa->ifa_next) + ifap = &ifa->ifa_next) { + if (!nx_v4_ifa_visible(nxi, ifa)) + continue; if (!strcmp(ifr.ifr_name, ifa->ifa_label)) break; + } } } @@ -826,6 +834,8 @@ goto out; for (; ifa; ifa = ifa->ifa_next) { + if (!nx_v4_ifa_visible(current_nx_info(), ifa)) + continue; if (!buf) { done += sizeof(ifr); continue; @@ -1185,6 +1195,7 @@ struct net_device *dev; struct in_device *in_dev; struct in_ifaddr *ifa; + struct sock *sk = skb->sk; int s_ip_idx, s_idx = cb->args[0]; s_ip_idx = ip_idx = cb->args[1]; @@ -1199,6 +1210,8 @@ for (ifa = in_dev->ifa_list, ip_idx = 0; ifa; ifa = ifa->ifa_next, ip_idx++) { + if (sk && !nx_v4_ifa_visible(sk->sk_nx_info, ifa)) + continue; if (ip_idx < s_ip_idx) continue; if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv4/fib_hash.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/fib_hash.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv4/fib_hash.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/fib_hash.c 2012-01-16 14:51:22.093408085 +0100 @@ -1021,7 +1021,7 @@ prefix = f->fn_key; mask = FZ_MASK(iter->zone); flags = fib_flag_trans(fa->fa_type, mask, fi); - if (fi) + if (fi && nx_dev_visible(current_nx_info(), fi->fib_dev)) seq_printf(seq, "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n", fi->fib_dev ? fi->fib_dev->name : "*", prefix, diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv4/inet_connection_sock.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/inet_connection_sock.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv4/inet_connection_sock.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/inet_connection_sock.c 2012-01-16 14:51:22.093408085 +0100 @@ -49,10 +49,40 @@ } EXPORT_SYMBOL(inet_get_local_port_range); +int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) +{ + __be32 sk1_rcv_saddr = inet_rcv_saddr(sk1), + sk2_rcv_saddr = inet_rcv_saddr(sk2); + + if (inet_v6_ipv6only(sk2)) + return 0; + + if (sk1_rcv_saddr && + sk2_rcv_saddr && + sk1_rcv_saddr == sk2_rcv_saddr) + return 1; + + if (sk1_rcv_saddr && + !sk2_rcv_saddr && + v4_addr_in_nx_info(sk2->sk_nx_info, sk1_rcv_saddr, NXA_MASK_BIND)) + return 1; + + if (sk2_rcv_saddr && + !sk1_rcv_saddr && + v4_addr_in_nx_info(sk1->sk_nx_info, sk2_rcv_saddr, NXA_MASK_BIND)) + return 1; + + if (!sk1_rcv_saddr && + !sk2_rcv_saddr && + nx_v4_addr_conflict(sk1->sk_nx_info, sk2->sk_nx_info)) + return 1; + + return 0; +} + int inet_csk_bind_conflict(const struct sock *sk, const struct inet_bind_bucket *tb) { - const __be32 sk_rcv_saddr = inet_rcv_saddr(sk); struct sock *sk2; struct hlist_node *node; int reuse = sk->sk_reuse; @@ -72,9 +102,7 @@ sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { if (!reuse || !sk2->sk_reuse || sk2->sk_state == TCP_LISTEN) { - const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); - if (!sk2_rcv_saddr || !sk_rcv_saddr || - sk2_rcv_saddr == sk_rcv_saddr) + if (ipv4_rcv_saddr_equal(sk, sk2)) break; } } diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv4/inet_diag.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/inet_diag.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv4/inet_diag.c 2012-01-16 15:01:40.048724952 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/inet_diag.c 2012-01-16 14:51:22.097408071 +0100 @@ -32,6 +32,8 @@ #include #include +#include +#include static const struct inet_diag_handler **inet_diag_table; @@ -118,8 +120,8 @@ r->id.idiag_sport = inet->sport; r->id.idiag_dport = inet->dport; - r->id.idiag_src[0] = inet->rcv_saddr; - r->id.idiag_dst[0] = inet->daddr; + r->id.idiag_src[0] = nx_map_sock_lback(sk->sk_nx_info, inet->rcv_saddr); + r->id.idiag_dst[0] = nx_map_sock_lback(sk->sk_nx_info, inet->daddr); #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) if (r->idiag_family == AF_INET6) { @@ -204,8 +206,8 @@ r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1); r->id.idiag_sport = tw->tw_sport; r->id.idiag_dport = tw->tw_dport; - r->id.idiag_src[0] = tw->tw_rcv_saddr; - r->id.idiag_dst[0] = tw->tw_daddr; + r->id.idiag_src[0] = nx_map_sock_lback(tw->tw_nx_info, tw->tw_rcv_saddr); + r->id.idiag_dst[0] = nx_map_sock_lback(tw->tw_nx_info, tw->tw_daddr); r->idiag_state = tw->tw_substate; r->idiag_timer = 3; r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ); @@ -262,6 +264,7 @@ err = -EINVAL; if (req->idiag_family == AF_INET) { + /* TODO: lback */ sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0], req->id.idiag_dport, req->id.idiag_src[0], req->id.idiag_sport, req->id.idiag_if); @@ -504,6 +507,7 @@ } else #endif { + /* TODO: lback */ entry.saddr = &inet->rcv_saddr; entry.daddr = &inet->daddr; } @@ -542,6 +546,7 @@ } else #endif { + /* TODO: lback */ entry.saddr = &tw->tw_rcv_saddr; entry.daddr = &tw->tw_daddr; } @@ -588,8 +593,8 @@ r->id.idiag_sport = inet->sport; r->id.idiag_dport = ireq->rmt_port; - r->id.idiag_src[0] = ireq->loc_addr; - r->id.idiag_dst[0] = ireq->rmt_addr; + r->id.idiag_src[0] = nx_map_sock_lback(sk->sk_nx_info, ireq->loc_addr); + r->id.idiag_dst[0] = nx_map_sock_lback(sk->sk_nx_info, ireq->rmt_addr); r->idiag_expires = jiffies_to_msecs(tmo); r->idiag_rqueue = 0; r->idiag_wqueue = 0; @@ -660,6 +665,7 @@ continue; if (bc) { + /* TODO: lback */ entry.saddr = #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) (entry.family == AF_INET6) ? @@ -730,6 +736,8 @@ sk_nulls_for_each(sk, node, &ilb->head) { struct inet_sock *inet = inet_sk(sk); + if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (num < s_num) { num++; continue; @@ -796,6 +804,8 @@ sk_nulls_for_each(sk, node, &head->chain) { struct inet_sock *inet = inet_sk(sk); + if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (num < s_num) goto next_normal; if (!(r->idiag_states & (1 << sk->sk_state))) @@ -820,6 +830,8 @@ inet_twsk_for_each(tw, node, &head->twchain) { + if (!nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT)) + continue; if (num < s_num) goto next_dying; if (r->id.idiag_sport != tw->tw_sport && diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv4/inet_hashtables.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/inet_hashtables.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv4/inet_hashtables.c 2012-01-16 15:01:40.056724924 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/inet_hashtables.c 2012-01-16 14:51:22.097408071 +0100 @@ -22,6 +22,7 @@ #include #include #include +#include #include /* @@ -135,6 +136,11 @@ if (rcv_saddr != daddr) return -1; score += 2; + } else { + /* block non nx_info ips */ + if (!v4_addr_in_nx_info(sk->sk_nx_info, + daddr, NXA_MASK_BIND)) + return -1; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) @@ -152,7 +158,6 @@ * wildcarded during the search since they can never be otherwise. */ - struct sock *__inet_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, const __be32 daddr, const unsigned short hnum, @@ -175,6 +180,7 @@ hiscore = score; } } + /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv4/netfilter/nf_nat_helper.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/netfilter/nf_nat_helper.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv4/netfilter/nf_nat_helper.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/netfilter/nf_nat_helper.c 2012-01-16 14:51:22.097408071 +0100 @@ -19,6 +19,7 @@ #include #include +#include #include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv4/netfilter.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/netfilter.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv4/netfilter.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/netfilter.c 2012-01-16 14:51:22.097408071 +0100 @@ -4,7 +4,7 @@ #include #include #include -#include +// #include #include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv4/raw.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/raw.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv4/raw.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/raw.c 2012-01-16 14:51:22.097408071 +0100 @@ -117,7 +117,7 @@ if (net_eq(sock_net(sk), net) && inet->num == num && !(inet->daddr && inet->daddr != raddr) && - !(inet->rcv_saddr && inet->rcv_saddr != laddr) && + v4_sock_addr_match(sk->sk_nx_info, inet, laddr) && !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) goto found; /* gotcha */ } @@ -383,6 +383,12 @@ icmp_out_count(net, ((struct icmphdr *) skb_transport_header(skb))->type); + err = -EPERM; + if (!nx_check(0, VS_ADMIN) && !capable(CAP_NET_RAW) && + sk->sk_nx_info && + !v4_addr_in_nx_info(sk->sk_nx_info, iph->saddr, NXA_MASK_BIND)) + goto error_free; + err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev, dst_output); if (err > 0) @@ -563,6 +569,13 @@ } security_sk_classify_flow(sk, &fl); + if (sk->sk_nx_info) { + err = ip_v4_find_src(sock_net(sk), + sk->sk_nx_info, &rt, &fl); + + if (err) + goto done; + } err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 1); } if (err) @@ -635,17 +648,19 @@ { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *addr = (struct sockaddr_in *) uaddr; + struct nx_v4_sock_addr nsa = { 0 }; int ret = -EINVAL; int chk_addr_ret; if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in)) goto out; - chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); + v4_map_sock_addr(inet, addr, &nsa); + chk_addr_ret = inet_addr_type(sock_net(sk), nsa.saddr); ret = -EADDRNOTAVAIL; - if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL && + if (nsa.saddr && chk_addr_ret != RTN_LOCAL && chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) goto out; - inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr; + v4_set_sock_addr(inet, &nsa); if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->saddr = 0; /* Use device */ sk_dst_reset(sk); @@ -697,7 +712,8 @@ /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; - sin->sin_addr.s_addr = ip_hdr(skb)->saddr; + sin->sin_addr.s_addr = + nx_map_sock_lback(sk->sk_nx_info, ip_hdr(skb)->saddr); sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } @@ -875,7 +891,8 @@ struct hlist_node *node; sk_for_each(sk, node, &state->h->ht[state->bucket]) - if (sock_net(sk) == seq_file_net(seq)) + if ((sock_net(sk) == seq_file_net(seq)) && + nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)) goto found; } sk = NULL; @@ -891,7 +908,8 @@ sk = sk_next(sk); try_again: ; - } while (sk && sock_net(sk) != seq_file_net(seq)); + } while (sk && ((sock_net(sk) != seq_file_net(seq)) || + !nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))); if (!sk && ++state->bucket < RAW_HTABLE_SIZE) { sk = sk_head(&state->h->ht[state->bucket]); @@ -950,7 +968,10 @@ seq_printf(seq, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", - i, src, srcp, dest, destp, sp->sk_state, + i, + nx_map_sock_lback(current_nx_info(), src), srcp, + nx_map_sock_lback(current_nx_info(), dest), destp, + sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv4/tcp.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/tcp.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv4/tcp.c 2012-01-16 15:01:40.076724853 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/tcp.c 2012-01-16 14:51:22.101408057 +0100 @@ -264,6 +264,7 @@ #include #include #include +#include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv4/tcp_ipv4.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/tcp_ipv4.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv4/tcp_ipv4.c 2012-01-16 15:01:40.076724853 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/tcp_ipv4.c 2012-01-16 14:51:22.101408057 +0100 @@ -1926,6 +1926,12 @@ req = req->dl_next; while (1) { while (req) { + vxdprintk(VXD_CBIT(net, 6), + "sk,req: %p [#%d] (from %d)", req->sk, + (req->sk)?req->sk->sk_nid:0, nx_current_nid()); + if (req->sk && + !nx_check(req->sk->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (req->rsk_ops->family == st->family) { cur = req; goto out; @@ -1950,6 +1956,10 @@ } get_sk: sk_nulls_for_each_from(sk, node) { + vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)", + sk, sk->sk_nid, nx_current_nid()); + if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) { cur = sk; goto out; @@ -2013,6 +2023,11 @@ spin_lock_bh(lock); sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { + vxdprintk(VXD_CBIT(net, 6), + "sk,egf: %p [#%d] (from %d)", + sk, sk->sk_nid, nx_current_nid()); + if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (sk->sk_family != st->family || !net_eq(sock_net(sk), net)) { continue; @@ -2023,6 +2038,11 @@ st->state = TCP_SEQ_STATE_TIME_WAIT; inet_twsk_for_each(tw, node, &tcp_hashinfo.ehash[st->bucket].twchain) { + vxdprintk(VXD_CBIT(net, 6), + "tw: %p [#%d] (from %d)", + tw, tw->tw_nid, nx_current_nid()); + if (!nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT)) + continue; if (tw->tw_family != st->family || !net_eq(twsk_net(tw), net)) { continue; @@ -2051,7 +2071,9 @@ tw = cur; tw = tw_next(tw); get_tw: - while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) { + while (tw && (tw->tw_family != st->family || + !net_eq(twsk_net(tw), net) || + !nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT))) { tw = tw_next(tw); } if (tw) { @@ -2074,6 +2096,11 @@ sk = sk_nulls_next(sk); sk_nulls_for_each_from(sk, node) { + vxdprintk(VXD_CBIT(net, 6), + "sk,egn: %p [#%d] (from %d)", + sk, sk->sk_nid, nx_current_nid()); + if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) goto found; } @@ -2225,9 +2252,9 @@ seq_printf(f, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n", i, - ireq->loc_addr, + nx_map_sock_lback(current_nx_info(), ireq->loc_addr), ntohs(inet_sk(sk)->sport), - ireq->rmt_addr, + nx_map_sock_lback(current_nx_info(), ireq->rmt_addr), ntohs(ireq->rmt_port), TCP_SYN_RECV, 0, 0, /* could print option size, but that is af dependent. */ @@ -2270,7 +2297,10 @@ seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n", - i, src, srcp, dest, destp, sk->sk_state, + i, + nx_map_sock_lback(current_nx_info(), src), srcp, + nx_map_sock_lback(current_nx_info(), dest), destp, + sk->sk_state, tp->write_seq - tp->snd_una, sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), @@ -2306,7 +2336,10 @@ seq_printf(f, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n", - i, src, srcp, dest, destp, tw->tw_substate, 0, 0, + i, + nx_map_sock_lback(current_nx_info(), src), srcp, + nx_map_sock_lback(current_nx_info(), dest), destp, + tw->tw_substate, 0, 0, 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, atomic_read(&tw->tw_refcnt), tw, len); } diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv4/tcp_minisocks.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/tcp_minisocks.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv4/tcp_minisocks.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/tcp_minisocks.c 2012-01-16 14:51:22.105408043 +0100 @@ -26,6 +26,10 @@ #include #include +#include +#include +#include + #ifdef CONFIG_SYSCTL #define SYNC_INIT 0 /* let the user enable it */ #else @@ -294,6 +298,11 @@ tcptw->tw_ts_recent = tp->rx_opt.ts_recent; tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; + tw->tw_xid = sk->sk_xid; + tw->tw_vx_info = NULL; + tw->tw_nid = sk->sk_nid; + tw->tw_nx_info = NULL; + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) if (tw->tw_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv4/udp.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/udp.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv4/udp.c 2012-01-16 15:01:40.076724853 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv4/udp.c 2012-01-16 14:51:22.105408043 +0100 @@ -224,14 +224,7 @@ } EXPORT_SYMBOL(udp_lib_get_port); -static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) -{ - struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); - - return (!ipv6_only_sock(sk2) && - (!inet1->rcv_saddr || !inet2->rcv_saddr || - inet1->rcv_saddr == inet2->rcv_saddr)); -} +extern int ipv4_rcv_saddr_equal(const struct sock *, const struct sock *); int udp_v4_get_port(struct sock *sk, unsigned short snum) { @@ -253,6 +246,11 @@ if (inet->rcv_saddr != daddr) return -1; score += 2; + } else { + /* block non nx_info ips */ + if (!v4_addr_in_nx_info(sk->sk_nx_info, + daddr, NXA_MASK_BIND)) + return -1; } if (inet->daddr) { if (inet->daddr != saddr) @@ -273,6 +271,7 @@ return score; } + /* UDP is nearly always wildcards out the wazoo, it makes no sense to try * harder than this. -DaveM */ @@ -294,6 +293,11 @@ sk_nulls_for_each_rcu(sk, node, &hslot->head) { score = compute_score(sk, net, saddr, hnum, sport, daddr, dport, dif); + /* FIXME: disabled? + if (score == 9) { + result = sk; + break; + } else */ if (score > badness) { result = sk; badness = score; @@ -307,6 +311,7 @@ if (get_nulls_value(node) != hash) goto begin; + if (result) { if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) result = NULL; @@ -316,6 +321,7 @@ goto begin; } } + rcu_read_unlock(); return result; } @@ -358,7 +364,7 @@ s->sk_hash != hnum || (inet->daddr && inet->daddr != rmt_addr) || (inet->dport != rmt_port && inet->dport) || - (inet->rcv_saddr && inet->rcv_saddr != loc_addr) || + !v4_sock_addr_match(sk->sk_nx_info, inet, loc_addr) || ipv6_only_sock(s) || (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)) continue; @@ -707,8 +713,13 @@ { .sport = inet->sport, .dport = dport } } }; struct net *net = sock_net(sk); + struct nx_info *nxi = sk->sk_nx_info; security_sk_classify_flow(sk, &fl); + err = ip_v4_find_src(net, nxi, &rt, &fl); + if (err) + goto out; + err = ip_route_output_flow(net, &rt, &fl, sk, 1); if (err) { if (err == -ENETUNREACH) @@ -988,7 +999,8 @@ if (sin) { sin->sin_family = AF_INET; sin->sin_port = udp_hdr(skb)->source; - sin->sin_addr.s_addr = ip_hdr(skb)->saddr; + sin->sin_addr.s_addr = nx_map_sock_lback( + skb->sk->sk_nx_info, ip_hdr(skb)->saddr); memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) @@ -1630,6 +1642,8 @@ sk_nulls_for_each(sk, node, &hslot->head) { if (!net_eq(sock_net(sk), net)) continue; + if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (sk->sk_family == state->family) goto found; } @@ -1647,7 +1661,9 @@ do { sk = sk_nulls_next(sk); - } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); + } while (sk && (!net_eq(sock_net(sk), net) || + sk->sk_family != state->family || + !nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))); if (!sk) { if (state->bucket < UDP_HTABLE_SIZE) @@ -1754,7 +1770,10 @@ seq_printf(f, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n", - bucket, src, srcp, dest, destp, sp->sk_state, + bucket, + nx_map_sock_lback(current_nx_info(), src), srcp, + nx_map_sock_lback(current_nx_info(), dest), destp, + sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv6/addrconf.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/addrconf.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv6/addrconf.c 2012-01-16 15:01:40.076724853 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/addrconf.c 2012-01-16 14:51:22.105408043 +0100 @@ -86,6 +86,8 @@ #include #include +#include +#include /* Set to 3 to get tracing... */ #define ACONF_DEBUG 2 @@ -1119,7 +1121,7 @@ int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev, const struct in6_addr *daddr, unsigned int prefs, - struct in6_addr *saddr) + struct in6_addr *saddr, struct nx_info *nxi) { struct ipv6_saddr_score scores[2], *score = &scores[0], *hiscore = &scores[1]; @@ -1192,6 +1194,8 @@ dev->name); continue; } + if (!v6_addr_in_nx_info(nxi, &score->ifa->addr, -1)) + continue; score->rule = -1; bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX); @@ -3000,7 +3004,10 @@ static int if6_seq_show(struct seq_file *seq, void *v) { struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v; - seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n", + + if (nx_check(0, VS_ADMIN|VS_WATCH) || + v6_addr_in_nx_info(current_nx_info(), &ifp->addr, -1)) + seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n", &ifp->addr, ifp->idev->dev->ifindex, ifp->prefix_len, @@ -3497,6 +3504,12 @@ struct ifmcaddr6 *ifmca; struct ifacaddr6 *ifaca; struct net *net = sock_net(skb->sk); + struct nx_info *nxi = skb->sk ? skb->sk->sk_nx_info : NULL; + + /* disable ipv6 on non v6 guests */ + if (nxi && !nx_info_has_v6(nxi)) + return skb->len; + s_idx = cb->args[0]; s_ip_idx = ip_idx = cb->args[1]; @@ -3518,6 +3531,8 @@ ifa = ifa->if_next, ip_idx++) { if (ip_idx < s_ip_idx) continue; + if (!v6_addr_in_nx_info(nxi, &ifa->addr, -1)) + continue; err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, @@ -3531,6 +3546,8 @@ ifmca = ifmca->next, ip_idx++) { if (ip_idx < s_ip_idx) continue; + if (!v6_addr_in_nx_info(nxi, &ifmca->mca_addr, -1)) + continue; err = inet6_fill_ifmcaddr(skb, ifmca, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, @@ -3544,6 +3561,8 @@ ifaca = ifaca->aca_next, ip_idx++) { if (ip_idx < s_ip_idx) continue; + if (!v6_addr_in_nx_info(nxi, &ifaca->aca_addr, -1)) + continue; err = inet6_fill_ifacaddr(skb, ifaca, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, @@ -3830,12 +3849,19 @@ int s_idx = cb->args[0]; struct net_device *dev; struct inet6_dev *idev; + struct nx_info *nxi = skb->sk ? skb->sk->sk_nx_info : NULL; + + /* FIXME: maybe disable ipv6 on non v6 guests? + if (skb->sk && skb->sk->sk_vx_info) + return skb->len; */ read_lock(&dev_base_lock); idx = 0; for_each_netdev(net, dev) { if (idx < s_idx) goto cont; + if (!v6_dev_in_nx_info(dev, nxi)) + goto cont; if ((idev = in6_dev_get(dev)) == NULL) goto cont; err = inet6_fill_ifinfo(skb, idev, NETLINK_CB(cb->skb).pid, diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv6/af_inet6.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/af_inet6.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv6/af_inet6.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/af_inet6.c 2012-01-16 14:51:22.105408043 +0100 @@ -41,6 +41,8 @@ #include #include #include +#include +#include #include #include @@ -158,9 +160,12 @@ } err = -EPERM; + if ((protocol == IPPROTO_ICMPV6) && + nx_capable(answer->capability, NXC_RAW_ICMP)) + goto override; if (answer->capability > 0 && !capable(answer->capability)) goto out_rcu_unlock; - +override: sock->ops = answer->ops; answer_prot = answer->prot; answer_no_check = answer->no_check; @@ -259,6 +264,7 @@ struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); + struct nx_v6_sock_addr nsa; __be32 v4addr = 0; unsigned short snum; int addr_type = 0; @@ -270,6 +276,11 @@ if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; + + err = v6_map_sock_addr(inet, addr, &nsa); + if (err) + return err; + addr_type = ipv6_addr_type(&addr->sin6_addr); if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM) return -EINVAL; @@ -301,6 +312,7 @@ /* Reproduce AF_INET checks to make the bindings consitant */ v4addr = addr->sin6_addr.s6_addr32[3]; chk_addr_ret = inet_addr_type(net, v4addr); + if (!sysctl_ip_nonlocal_bind && !(inet->freebind || inet->transparent) && v4addr != htonl(INADDR_ANY) && @@ -310,6 +322,10 @@ err = -EADDRNOTAVAIL; goto out; } + if (!v4_addr_in_nx_info(sk->sk_nx_info, v4addr, NXA_MASK_BIND)) { + err = -EADDRNOTAVAIL; + goto out; + } } else { if (addr_type != IPV6_ADDR_ANY) { struct net_device *dev = NULL; @@ -335,6 +351,11 @@ } } + if (!v6_addr_in_nx_info(sk->sk_nx_info, &addr->sin6_addr, -1)) { + err = -EADDRNOTAVAIL; + goto out; + } + /* ipv4 addr of the socket is invalid. Only the * unspecified and mapped address have a v4 equivalent. */ @@ -353,6 +374,8 @@ } } + v6_set_sock_addr(inet, &nsa); + inet->rcv_saddr = v4addr; inet->saddr = v4addr; @@ -448,9 +471,11 @@ return -ENOTCONN; sin->sin6_port = inet->dport; ipv6_addr_copy(&sin->sin6_addr, &np->daddr); + /* FIXME: remap lback? */ if (np->sndflow) sin->sin6_flowinfo = np->flow_label; } else { + /* FIXME: remap lback? */ if (ipv6_addr_any(&np->rcv_saddr)) ipv6_addr_copy(&sin->sin6_addr, &np->saddr); else diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv6/fib6_rules.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/fib6_rules.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv6/fib6_rules.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/fib6_rules.c 2012-01-16 14:51:22.109408029 +0100 @@ -96,7 +96,7 @@ if (ipv6_dev_get_saddr(net, ip6_dst_idev(&rt->u.dst)->dev, &flp->fl6_dst, srcprefs, - &saddr)) + &saddr, NULL)) goto again; if (!ipv6_prefix_equal(&saddr, &r->src.addr, r->src.plen)) diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv6/inet6_hashtables.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/inet6_hashtables.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv6/inet6_hashtables.c 2012-01-16 15:01:40.076724853 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/inet6_hashtables.c 2012-01-16 14:51:22.109408029 +0100 @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -77,7 +78,6 @@ unsigned int slot = hash & (hashinfo->ehash_size - 1); struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; - rcu_read_lock(); begin: sk_nulls_for_each_rcu(sk, node, &head->chain) { @@ -89,7 +89,7 @@ sock_put(sk); goto begin; } - goto out; + goto out; } } if (get_nulls_value(node) != slot) @@ -135,6 +135,9 @@ if (!ipv6_addr_equal(&np->rcv_saddr, daddr)) return -1; score++; + } else { + if (!v6_addr_in_nx_info(sk->sk_nx_info, daddr, -1)) + return -1; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv6/ip6_output.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/ip6_output.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv6/ip6_output.c 2012-01-16 15:01:40.076724853 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/ip6_output.c 2012-01-16 14:51:22.113408015 +0100 @@ -942,7 +942,7 @@ err = ipv6_dev_get_saddr(net, ip6_dst_idev(*dst)->dev, &fl->fl6_dst, sk ? inet6_sk(sk)->srcprefs : 0, - &fl->fl6_src); + &fl->fl6_src, sk->sk_nx_info); if (err) goto out_err_release; } diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv6/Kconfig kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/Kconfig --- kernel-2.6.32.54/linux-2.6.32/net/ipv6/Kconfig 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/Kconfig 2012-01-16 14:51:22.105408043 +0100 @@ -4,8 +4,8 @@ # IPv6 as module will cause a CRASH if you try to unload it menuconfig IPV6 - tristate "The IPv6 protocol" - default m + bool "The IPv6 protocol" + default n ---help--- This is complemental support for the IP version 6. You will still be able to do traditional IPv4 networking as well. diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv6/ndisc.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/ndisc.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv6/ndisc.c 2012-01-16 15:01:40.364723834 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/ndisc.c 2012-01-16 14:51:22.113408015 +0100 @@ -591,7 +591,7 @@ } else { if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr, inet6_sk(dev_net(dev)->ipv6.ndisc_sk)->srcprefs, - &tmpaddr)) + &tmpaddr, NULL /* FIXME: ? */ )) return; src_addr = &tmpaddr; } diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv6/ndisc.c.orig kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/ndisc.c.orig --- kernel-2.6.32.54/linux-2.6.32/net/ipv6/ndisc.c.orig 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/ndisc.c.orig 2012-01-16 14:47:19.554254579 +0100 @@ -157,6 +157,8 @@ .gc_thresh3 = 1024, }; +EXPORT_SYMBOL(nd_tbl); + /* ND options */ struct ndisc_options { struct nd_opt_hdr *nd_opt_array[__ND_OPT_ARRAY_MAX]; diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv6/raw.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/raw.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv6/raw.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/raw.c 2012-01-16 14:51:22.113408015 +0100 @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -281,6 +282,13 @@ } } + if (!v6_addr_in_nx_info(sk->sk_nx_info, &addr->sin6_addr, -1)) { + err = -EADDRNOTAVAIL; + if (dev) + dev_put(dev); + goto out; + } + /* ipv4 addr of the socket is invalid. Only the * unspecified and mapped address have a v4 equivalent. */ diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv6/route.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/route.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv6/route.c 2012-01-16 15:01:40.084724825 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/route.c 2012-01-16 14:51:22.113408015 +0100 @@ -2277,7 +2277,8 @@ struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst); struct in6_addr saddr_buf; if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, - dst, 0, &saddr_buf) == 0) + dst, 0, &saddr_buf, + (skb->sk ? skb->sk->sk_nx_info : NULL)) == 0) NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); } diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv6/tcp_ipv6.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/tcp_ipv6.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv6/tcp_ipv6.c 2012-01-16 15:01:40.084724825 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/tcp_ipv6.c 2012-01-16 14:51:22.113408015 +0100 @@ -69,6 +69,7 @@ #include #include +#include static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, @@ -157,8 +158,15 @@ * connect() to INADDR_ANY means loopback (BSD'ism). */ - if(ipv6_addr_any(&usin->sin6_addr)) - usin->sin6_addr.s6_addr[15] = 0x1; + if(ipv6_addr_any(&usin->sin6_addr)) { + struct nx_info *nxi = sk->sk_nx_info; + + if (nxi && nx_info_has_v6(nxi)) + /* FIXME: remap lback? */ + usin->sin6_addr = nxi->v6.ip; + else + usin->sin6_addr.s6_addr[15] = 0x1; + } addr_type = ipv6_addr_type(&usin->sin6_addr); diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv6/udp.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/udp.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv6/udp.c 2012-01-16 15:01:40.084724825 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/udp.c 2012-01-16 14:51:22.113408015 +0100 @@ -47,6 +47,7 @@ #include #include +#include #include "udp_impl.h" int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) @@ -61,24 +62,49 @@ int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; /* if both are mapped, treat as IPv4 */ - if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) - return (!sk2_ipv6only && + if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) { + if (!sk2_ipv6only && (!sk_rcv_saddr || !sk2_rcv_saddr || - sk_rcv_saddr == sk2_rcv_saddr)); + sk_rcv_saddr == sk2_rcv_saddr)) + goto vs_v4; + else + return 0; + } if (addr_type2 == IPV6_ADDR_ANY && !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) - return 1; + goto vs; if (addr_type == IPV6_ADDR_ANY && !(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) - return 1; + goto vs; if (sk2_rcv_saddr6 && ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6)) - return 1; + goto vs; return 0; + +vs_v4: + if (!sk_rcv_saddr && !sk2_rcv_saddr) + return nx_v4_addr_conflict(sk->sk_nx_info, sk2->sk_nx_info); + if (!sk2_rcv_saddr) + return v4_addr_in_nx_info(sk->sk_nx_info, sk2_rcv_saddr, -1); + if (!sk_rcv_saddr) + return v4_addr_in_nx_info(sk2->sk_nx_info, sk_rcv_saddr, -1); + return 1; +vs: + if (addr_type2 == IPV6_ADDR_ANY && addr_type == IPV6_ADDR_ANY) + return nx_v6_addr_conflict(sk->sk_nx_info, sk2->sk_nx_info); + else if (addr_type2 == IPV6_ADDR_ANY) + return v6_addr_in_nx_info(sk2->sk_nx_info, sk_rcv_saddr6, -1); + else if (addr_type == IPV6_ADDR_ANY) { + if (addr_type2 == IPV6_ADDR_MAPPED) + return nx_v4_addr_conflict(sk->sk_nx_info, sk2->sk_nx_info); + else + return v6_addr_in_nx_info(sk->sk_nx_info, sk2_rcv_saddr6, -1); + } + return 1; } int udp_v6_get_port(struct sock *sk, unsigned short snum) @@ -109,6 +135,10 @@ if (!ipv6_addr_equal(&np->rcv_saddr, daddr)) return -1; score++; + } else { + /* block non nx_info ips */ + if (!v6_addr_in_nx_info(sk->sk_nx_info, daddr, -1)) + return -1; } if (!ipv6_addr_any(&np->daddr)) { if (!ipv6_addr_equal(&np->daddr, saddr)) diff -Nur kernel-2.6.32.54/linux-2.6.32/net/ipv6/xfrm6_policy.c kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/xfrm6_policy.c --- kernel-2.6.32.54/linux-2.6.32/net/ipv6/xfrm6_policy.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/ipv6/xfrm6_policy.c 2012-01-16 14:51:22.113408015 +0100 @@ -63,7 +63,7 @@ dev = ip6_dst_idev(dst)->dev; ipv6_dev_get_saddr(dev_net(dev), dev, (struct in6_addr *)&daddr->a6, 0, - (struct in6_addr *)&saddr->a6); + (struct in6_addr *)&saddr->a6, NULL); dst_release(dst); return 0; } diff -Nur kernel-2.6.32.54/linux-2.6.32/net/netlink/af_netlink.c kernel-2.6.32.54.vs/linux-2.6.32/net/netlink/af_netlink.c --- kernel-2.6.32.54/linux-2.6.32/net/netlink/af_netlink.c 2012-01-16 15:01:40.136724641 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/netlink/af_netlink.c 2012-01-16 14:51:22.121407987 +0100 @@ -55,6 +55,9 @@ #include #include #include +#include +#include +#include #include #include @@ -1885,6 +1888,8 @@ sk_for_each(s, node, &hash->table[j]) { if (sock_net(s) != seq_file_net(seq)) continue; + if (!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (off == pos) { iter->link = i; iter->hash_idx = j; @@ -1919,7 +1924,8 @@ s = v; do { s = sk_next(s); - } while (s && sock_net(s) != seq_file_net(seq)); + } while (s && (sock_net(s) != seq_file_net(seq) || + !nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT))); if (s) return s; @@ -1931,7 +1937,8 @@ for (; j <= hash->mask; j++) { s = sk_head(&hash->table[j]); - while (s && sock_net(s) != seq_file_net(seq)) + while (s && (sock_net(s) != seq_file_net(seq) || + !nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT))) s = sk_next(s); if (s) { iter->link = i; diff -Nur kernel-2.6.32.54/linux-2.6.32/net/sctp/ipv6.c kernel-2.6.32.54.vs/linux-2.6.32/net/sctp/ipv6.c --- kernel-2.6.32.54/linux-2.6.32/net/sctp/ipv6.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/sctp/ipv6.c 2012-01-16 14:51:22.121407987 +0100 @@ -316,7 +316,8 @@ dst ? ip6_dst_idev(dst)->dev : NULL, &daddr->v6.sin6_addr, inet6_sk(&sk->inet.sk)->srcprefs, - &saddr->v6.sin6_addr); + &saddr->v6.sin6_addr, + asoc->base.sk->sk_nx_info); SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: %pI6\n", &saddr->v6.sin6_addr); return; diff -Nur kernel-2.6.32.54/linux-2.6.32/net/socket.c kernel-2.6.32.54.vs/linux-2.6.32/net/socket.c --- kernel-2.6.32.54/linux-2.6.32/net/socket.c 2012-01-16 15:01:40.144724612 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/socket.c 2012-01-16 14:51:22.125407973 +0100 @@ -96,6 +96,10 @@ #include #include +#include +#include +#include +#include static int sock_no_open(struct inode *irrelevant, struct file *dontcare); static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, @@ -559,7 +563,7 @@ struct msghdr *msg, size_t size) { struct sock_iocb *si = kiocb_to_siocb(iocb); - int err; + int err, len; si->sock = sock; si->scm = NULL; @@ -570,7 +574,22 @@ if (err) return err; - return sock->ops->sendmsg(iocb, sock, msg, size); + len = sock->ops->sendmsg(iocb, sock, msg, size); + if (sock->sk) { + if (len == size) + vx_sock_send(sock->sk, size); + else + vx_sock_fail(sock->sk, size); + } + vxdprintk(VXD_CBIT(net, 7), + "__sock_sendmsg: %p[%p,%p,%p;%d/%d]:%d/%d", + sock, sock->sk, + (sock->sk)?sock->sk->sk_nx_info:0, + (sock->sk)?sock->sk->sk_vx_info:0, + (sock->sk)?sock->sk->sk_xid:0, + (sock->sk)?sock->sk->sk_nid:0, + (unsigned int)size, len); + return len; } int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) @@ -671,7 +690,7 @@ static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { - int err; + int err, len; struct sock_iocb *si = kiocb_to_siocb(iocb); si->sock = sock; @@ -684,7 +703,18 @@ if (err) return err; - return sock->ops->recvmsg(iocb, sock, msg, size, flags); + len = sock->ops->recvmsg(iocb, sock, msg, size, flags); + if ((len >= 0) && sock->sk) + vx_sock_recv(sock->sk, len); + vxdprintk(VXD_CBIT(net, 7), + "__sock_recvmsg: %p[%p,%p,%p;%d/%d]:%d/%d", + sock, sock->sk, + (sock->sk)?sock->sk->sk_nx_info:0, + (sock->sk)?sock->sk->sk_vx_info:0, + (sock->sk)?sock->sk->sk_xid:0, + (sock->sk)?sock->sk->sk_nid:0, + (unsigned int)size, len); + return len; } int sock_recvmsg(struct socket *sock, struct msghdr *msg, @@ -1155,6 +1185,13 @@ if (type < 0 || type >= SOCK_MAX) return -EINVAL; + if (!nx_check(0, VS_ADMIN)) { + if (family == PF_INET && !current_nx_info_has_v4()) + return -EAFNOSUPPORT; + if (family == PF_INET6 && !current_nx_info_has_v6()) + return -EAFNOSUPPORT; + } + /* Compatibility. This uglymoron is moved from INET layer to here to avoid @@ -1287,6 +1324,7 @@ if (retval < 0) goto out; + set_bit(SOCK_USER_SOCKET, &sock->flags); retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK)); if (retval < 0) goto out_release; @@ -1328,10 +1366,12 @@ err = sock_create(family, type, protocol, &sock1); if (err < 0) goto out; + set_bit(SOCK_USER_SOCKET, &sock1->flags); err = sock_create(family, type, protocol, &sock2); if (err < 0) goto out_release_1; + set_bit(SOCK_USER_SOCKET, &sock2->flags); err = sock1->ops->socketpair(sock1, sock2); if (err < 0) diff -Nur kernel-2.6.32.54/linux-2.6.32/net/sunrpc/auth.c kernel-2.6.32.54.vs/linux-2.6.32/net/sunrpc/auth.c --- kernel-2.6.32.54/linux-2.6.32/net/sunrpc/auth.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/sunrpc/auth.c 2012-01-16 14:51:22.125407973 +0100 @@ -14,6 +14,7 @@ #include #include #include +#include #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH @@ -360,6 +361,7 @@ memset(&acred, 0, sizeof(acred)); acred.uid = cred->fsuid; acred.gid = cred->fsgid; + acred.tag = dx_current_tag(); acred.group_info = get_group_info(((struct cred *)cred)->group_info); ret = auth->au_ops->lookup_cred(auth, &acred, flags); @@ -400,6 +402,7 @@ struct auth_cred acred = { .uid = 0, .gid = 0, + .tag = dx_current_tag(), }; struct rpc_cred *ret; diff -Nur kernel-2.6.32.54/linux-2.6.32/net/sunrpc/auth_unix.c kernel-2.6.32.54.vs/linux-2.6.32/net/sunrpc/auth_unix.c --- kernel-2.6.32.54/linux-2.6.32/net/sunrpc/auth_unix.c 2012-01-16 15:01:40.156724570 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/sunrpc/auth_unix.c 2012-01-16 14:51:22.125407973 +0100 @@ -11,12 +11,14 @@ #include #include #include +#include #define NFS_NGROUPS 16 struct unx_cred { struct rpc_cred uc_base; gid_t uc_gid; + tag_t uc_tag; gid_t uc_gids[NFS_NGROUPS]; }; #define uc_uid uc_base.cr_uid @@ -78,6 +80,7 @@ groups = NFS_NGROUPS; cred->uc_gid = acred->gid; + cred->uc_tag = acred->tag; for (i = 0; i < groups; i++) cred->uc_gids[i] = GROUP_AT(acred->group_info, i); if (i < NFS_NGROUPS) @@ -119,7 +122,9 @@ unsigned int i; - if (cred->uc_uid != acred->uid || cred->uc_gid != acred->gid) + if (cred->uc_uid != acred->uid || + cred->uc_gid != acred->gid || + cred->uc_tag != acred->tag) return 0; if (acred->group_info != NULL) @@ -145,7 +150,7 @@ struct rpc_clnt *clnt = task->tk_client; struct unx_cred *cred = container_of(task->tk_msg.rpc_cred, struct unx_cred, uc_base); __be32 *base, *hold; - int i; + int i, tag; *p++ = htonl(RPC_AUTH_UNIX); base = p++; @@ -155,9 +160,12 @@ * Copy the UTS nodename captured when the client was created. */ p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen); + tag = task->tk_client->cl_tag; - *p++ = htonl((u32) cred->uc_uid); - *p++ = htonl((u32) cred->uc_gid); + *p++ = htonl((u32) TAGINO_UID(tag, + cred->uc_uid, cred->uc_tag)); + *p++ = htonl((u32) TAGINO_GID(tag, + cred->uc_gid, cred->uc_tag)); hold = p++; for (i = 0; i < 16 && cred->uc_gids[i] != (gid_t) NOGROUP; i++) *p++ = htonl((u32) cred->uc_gids[i]); diff -Nur kernel-2.6.32.54/linux-2.6.32/net/sunrpc/clnt.c kernel-2.6.32.54.vs/linux-2.6.32/net/sunrpc/clnt.c --- kernel-2.6.32.54/linux-2.6.32/net/sunrpc/clnt.c 2012-01-16 15:01:40.156724570 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/sunrpc/clnt.c 2012-01-16 14:51:22.125407973 +0100 @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -358,6 +359,9 @@ if (!(args->flags & RPC_CLNT_CREATE_QUIET)) clnt->cl_chatty = 1; + /* TODO: handle RPC_CLNT_CREATE_TAGGED + if (args->flags & RPC_CLNT_CREATE_TAGGED) + clnt->cl_tag = 1; */ return clnt; } EXPORT_SYMBOL_GPL(rpc_create); diff -Nur kernel-2.6.32.54/linux-2.6.32/net/unix/af_unix.c kernel-2.6.32.54.vs/linux-2.6.32/net/unix/af_unix.c --- kernel-2.6.32.54/linux-2.6.32/net/unix/af_unix.c 2012-01-16 15:01:40.164724542 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/net/unix/af_unix.c 2012-01-16 14:51:22.125407973 +0100 @@ -114,6 +114,8 @@ #include #include #include +#include +#include static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1]; static DEFINE_SPINLOCK(unix_table_lock); @@ -258,6 +260,8 @@ if (!net_eq(sock_net(s), net)) continue; + if (!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (u->addr->len == len && !memcmp(u->addr->name, sunname, len)) goto found; @@ -2164,6 +2168,8 @@ for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) { if (sock_net(s) != seq_file_net(seq)) continue; + if (!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (off == pos) return s; ++off; @@ -2188,7 +2194,8 @@ sk = first_unix_socket(&iter->i); else sk = next_unix_socket(&iter->i, sk); - while (sk && (sock_net(sk) != seq_file_net(seq))) + while (sk && (sock_net(sk) != seq_file_net(seq) || + !nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))) sk = next_unix_socket(&iter->i, sk); return sk; } diff -Nur kernel-2.6.32.54/linux-2.6.32/scripts/checksyscalls.sh kernel-2.6.32.54.vs/linux-2.6.32/scripts/checksyscalls.sh --- kernel-2.6.32.54/linux-2.6.32/scripts/checksyscalls.sh 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/scripts/checksyscalls.sh 2012-01-16 14:51:22.125407973 +0100 @@ -194,7 +194,6 @@ #define __IGNORE_afs_syscall #define __IGNORE_getpmsg #define __IGNORE_putpmsg -#define __IGNORE_vserver EOF } diff -Nur kernel-2.6.32.54/linux-2.6.32/security/commoncap.c kernel-2.6.32.54.vs/linux-2.6.32/security/commoncap.c --- kernel-2.6.32.54/linux-2.6.32/security/commoncap.c 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/security/commoncap.c 2012-01-16 14:51:22.129407959 +0100 @@ -27,6 +27,7 @@ #include #include #include +#include /* * If a non-root user executes a setuid-root binary in @@ -52,7 +53,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb) { - NETLINK_CB(skb).eff_cap = current_cap(); + NETLINK_CB(skb).eff_cap = vx_mbcaps(current_cap()); return 0; } @@ -62,6 +63,7 @@ return -EPERM; return 0; } + EXPORT_SYMBOL(cap_netlink_recv); /** @@ -82,7 +84,22 @@ int cap_capable(struct task_struct *tsk, const struct cred *cred, int cap, int audit) { - return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM; + struct vx_info *vxi = tsk->vx_info; + +#if 0 + printk("cap_capable() VXF_STATE_SETUP = %llx, raised = %x, eff = %08x:%08x\n", + vx_info_flags(vxi, VXF_STATE_SETUP, 0), + cap_raised(tsk->cap_effective, cap), + tsk->cap_effective.cap[1], tsk->cap_effective.cap[0]); +#endif + + /* special case SETUP */ + if (vx_info_flags(vxi, VXF_STATE_SETUP, 0) && + /* FIXME: maybe use cred instead? */ + cap_raised(tsk->cred->cap_effective, cap)) + return 0; + + return vx_cap_raised(vxi, cred->cap_effective, cap) ? 0 : -EPERM; } /** @@ -618,7 +635,7 @@ if (!strncmp(name, XATTR_SECURITY_PREFIX, sizeof(XATTR_SECURITY_PREFIX) - 1) && - !capable(CAP_SYS_ADMIN)) + !vx_capable(CAP_SYS_ADMIN, VXC_FS_SECURITY)) return -EPERM; return 0; } @@ -644,7 +661,7 @@ if (!strncmp(name, XATTR_SECURITY_PREFIX, sizeof(XATTR_SECURITY_PREFIX) - 1) && - !capable(CAP_SYS_ADMIN)) + !vx_capable(CAP_SYS_ADMIN, VXC_FS_SECURITY)) return -EPERM; return 0; } @@ -962,7 +979,8 @@ */ int cap_syslog(int type) { - if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN)) + if ((type != 3 && type != 10) && + !vx_capable(CAP_SYS_ADMIN, VXC_SYSLOG)) return -EPERM; return 0; } diff -Nur kernel-2.6.32.54/linux-2.6.32/security/selinux/hooks.c kernel-2.6.32.54.vs/linux-2.6.32/security/selinux/hooks.c --- kernel-2.6.32.54/linux-2.6.32/security/selinux/hooks.c 2012-01-16 15:01:40.180724485 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/security/selinux/hooks.c 2012-01-16 14:51:22.133407945 +0100 @@ -64,7 +64,6 @@ #include #include #include /* for Unix socket types */ -#include /* for Unix socket types */ #include #include #include diff -Nur kernel-2.6.32.54/linux-2.6.32/security/selinux/include/av_permissions.h kernel-2.6.32.54.vs/linux-2.6.32/security/selinux/include/av_permissions.h --- kernel-2.6.32.54/linux-2.6.32/security/selinux/include/av_permissions.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/security/selinux/include/av_permissions.h 2012-01-16 14:51:22.133407945 +0100 @@ -565,6 +565,7 @@ #define CAPABILITY__SETFCAP 0x80000000UL #define CAPABILITY2__MAC_OVERRIDE 0x00000001UL #define CAPABILITY2__MAC_ADMIN 0x00000002UL +#define CAPABILITY2__CONTEXT 0x00000004UL #define NETLINK_ROUTE_SOCKET__IOCTL 0x00000001UL #define NETLINK_ROUTE_SOCKET__READ 0x00000002UL #define NETLINK_ROUTE_SOCKET__WRITE 0x00000004UL diff -Nur kernel-2.6.32.54/linux-2.6.32/security/selinux/include/av_perm_to_string.h kernel-2.6.32.54.vs/linux-2.6.32/security/selinux/include/av_perm_to_string.h --- kernel-2.6.32.54/linux-2.6.32/security/selinux/include/av_perm_to_string.h 2009-12-03 04:51:21.000000000 +0100 +++ kernel-2.6.32.54.vs/linux-2.6.32/security/selinux/include/av_perm_to_string.h 2012-01-16 14:51:22.133407945 +0100 @@ -142,6 +142,7 @@ S_(SECCLASS_CAPABILITY, CAPABILITY__SETFCAP, "setfcap") S_(SECCLASS_CAPABILITY2, CAPABILITY2__MAC_OVERRIDE, "mac_override") S_(SECCLASS_CAPABILITY2, CAPABILITY2__MAC_ADMIN, "mac_admin") + S_(SECCLASS_CAPABILITY2, CAPABILITY2__CONTEXT, "context") S_(SECCLASS_NETLINK_ROUTE_SOCKET, NETLINK_ROUTE_SOCKET__NLMSG_READ, "nlmsg_read") S_(SECCLASS_NETLINK_ROUTE_SOCKET, NETLINK_ROUTE_SOCKET__NLMSG_WRITE, "nlmsg_write") S_(SECCLASS_NETLINK_FIREWALL_SOCKET, NETLINK_FIREWALL_SOCKET__NLMSG_READ, "nlmsg_read")