diff -urN linux.orig/Documentation/Configure.help linux/Documentation/Configure.help --- linux.orig/Documentation/Configure.help Tue May 21 01:32:34 2002 +++ linux/Documentation/Configure.help Wed Aug 7 17:44:10 2002 @@ -2099,6 +2099,11 @@ "PPro/6X86MX" Select this for the Cyrix/IBM/National Semiconductor 6x86MX/MII and Intel Pentium II/Pentium Pro. + "K7" Select this for the AMD Athlon K7 CPUs with 64 bytes large L1 + cachelines. + "P4" Select this for the Intel Pentium4 CPUs with 128 bytes large L1 + cachelines. + If you don't know what to do, choose "386". 486 diff -urN linux.orig/arch/alpha/kernel/time.c linux/arch/alpha/kernel/time.c --- linux.orig/arch/alpha/kernel/time.c Sun Mar 25 18:31:46 2001 +++ linux/arch/alpha/kernel/time.c Wed Aug 7 17:53:48 2002 @@ -339,6 +339,20 @@ irq_handler = timer_interrupt; if (request_irq(TIMER_IRQ, irq_handler, 0, "timer", NULL)) panic("Could not allocate timer IRQ!"); + do_get_fast_time = do_gettimeofday; +} + +static inline void +timeval_normalize(struct timeval * tv) +{ + time_t __sec; + + __sec = tv->tv_usec / 1000000; + if (__sec) + { + tv->tv_usec %= 1000000; + tv->tv_sec += __sec; + } } /* @@ -389,13 +403,11 @@ #endif usec += delta_usec; - if (usec >= 1000000) { - sec += 1; - usec -= 1000000; - } tv->tv_sec = sec; tv->tv_usec = usec; + + timeval_normalize(tv); } void diff -urN linux.orig/arch/alpha/mm/fault.c linux/arch/alpha/mm/fault.c --- linux.orig/arch/alpha/mm/fault.c Sun Mar 25 18:37:29 2001 +++ linux/arch/alpha/mm/fault.c Wed Aug 7 17:57:18 2002 @@ -102,7 +102,7 @@ goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; - if (expand_stack(vma, address)) + if (expand_stack(vma, address, NULL)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so diff -urN linux.orig/arch/i386/Makefile linux/arch/i386/Makefile --- linux.orig/arch/i386/Makefile Sun Mar 25 18:31:45 2001 +++ linux/arch/i386/Makefile Wed Aug 7 17:44:10 2002 @@ -43,6 +43,14 @@ CFLAGS := $(CFLAGS) -m486 -malign-loops=2 -malign-jumps=2 -malign-functions=2 -DCPU=686 endif +ifdef CONFIG_M686_L1_64 +CFLAGS := $(CFLAGS) -m486 -malign-loops=2 -malign-jumps=2 -malign-functions=2 -DCPU=686 +endif + +ifdef CONFIG_M686_L1_128 +CFLAGS := $(CFLAGS) -m486 -malign-loops=2 -malign-jumps=2 -malign-functions=2 -DCPU=686 +endif + HEAD := arch/i386/kernel/head.o arch/i386/kernel/init_task.o SUBDIRS := $(SUBDIRS) arch/i386/kernel arch/i386/mm arch/i386/lib diff -urN linux.orig/arch/i386/config.in linux/arch/i386/config.in --- linux.orig/arch/i386/config.in Fri Nov 2 17:39:05 2001 +++ linux/arch/i386/config.in Wed Aug 7 17:44:10 2002 @@ -18,7 +18,9 @@ 486/Cx486 CONFIG_M486 \ 586/K5/5x86/6x86 CONFIG_M586 \ Pentium/K6/TSC/CyrixIII CONFIG_M586TSC \ - PPro/6x86MX CONFIG_M686" PPro + PPro/6x86MX CONFIG_M686 \ + K7 CONFIG_M686_L1_64 \ + P4 CONFIG_M686_L1_128" PPro # # Define implied options from the CPU selection here # @@ -28,10 +30,10 @@ define_bool CONFIG_X86_BSWAP y define_bool CONFIG_X86_POPAD_OK y fi -if [ "$CONFIG_M686" = "y" -o "$CONFIG_M586TSC" = "y" ]; then +if [ "$CONFIG_M686" = "y" -o "$CONFIG_M586TSC" = "y" -o "$CONFIG_M686_L1_64" = "y" -o "$CONFIG_M686_L1_128" = "y" ]; then define_bool CONFIG_X86_TSC y fi -if [ "$CONFIG_M686" = "y" ]; then +if [ "$CONFIG_M686" = "y" -o "$CONFIG_M686_L1_64" = "y" -o "$CONFIG_M686_L1_128" = "y" ]; then define_bool CONFIG_X86_GOOD_APIC y fi diff -urN linux.orig/arch/i386/kernel/Makefile linux/arch/i386/kernel/Makefile --- linux.orig/arch/i386/kernel/Makefile Sun Mar 25 18:37:29 2001 +++ linux/arch/i386/kernel/Makefile Wed Aug 7 17:44:42 2002 @@ -15,7 +15,7 @@ O_TARGET := kernel.o O_OBJS := process.o signal.o entry.o traps.o irq.o vm86.o \ ptrace.o ioport.o ldt.o setup.o time.o sys_i386.o \ - bluesmoke.o + bluesmoke.o i387.o OX_OBJS := i386_ksyms.o dmi_scan.o MX_OBJS := diff -urN linux.orig/arch/i386/kernel/entry.S linux/arch/i386/kernel/entry.S --- linux.orig/arch/i386/kernel/entry.S Tue May 21 01:32:34 2002 +++ linux/arch/i386/kernel/entry.S Wed Aug 7 17:44:43 2002 @@ -288,6 +288,11 @@ pushl $ SYMBOL_NAME(do_coprocessor_error) jmp error_code +ENTRY(simd_coprocessor_error) + pushl $0 + pushl $ SYMBOL_NAME(do_simd_coprocessor_error) + jmp error_code + ENTRY(device_not_available) pushl $-1 # mark this as an int SAVE_ALL diff -urN linux.orig/arch/i386/kernel/head.S linux/arch/i386/kernel/head.S --- linux.orig/arch/i386/kernel/head.S Tue May 21 01:32:34 2002 +++ linux/arch/i386/kernel/head.S Wed Aug 7 17:44:43 2002 @@ -64,10 +64,13 @@ * NOTE! We have to correct for the fact that we're * not yet offset PAGE_OFFSET.. */ -#define cr4_bits mmu_cr4_features-__PAGE_OFFSET +#define cr4_bits x86_cr4-__PAGE_OFFSET movl %cr4,%eax # Turn on 4Mb pages orl cr4_bits,%eax movl %eax,%cr4 + movl %cr3,%eax # Intel specification clarification says + movl %eax,%cr3 # to do this. Maybe it makes a difference. + # Who knows ? #endif /* * Setup paging (the tables are already set up, just switch them on) @@ -222,21 +225,6 @@ orl $2,%eax # set MP 2: movl %eax,%cr0 call check_x87 -#ifdef __SMP__ - movb ready,%al # First CPU if 0 - orb %al,%al - jz 4f # First CPU skip this stuff - movl %cr4,%eax # Turn on 4Mb pages - orl $16,%eax - movl %eax,%cr4 - movl %cr3,%eax # Intel specification clarification says - movl %eax,%cr3 # to do this. Maybe it makes a difference. - # Who knows ? -#endif -4: -#ifdef __SMP__ - incb ready -#endif lgdt gdt_descr lidt idt_descr ljmp $(__KERNEL_CS),$1f @@ -259,10 +247,6 @@ jmp L6 # main should never return here, but # just in case, we know what happens. -#ifdef __SMP__ -ready: .byte 0 -#endif - /* * We depend on ET to be correct. This checks for 287/387. */ diff -urN linux.orig/arch/i386/kernel/i386_ksyms.c linux/arch/i386/kernel/i386_ksyms.c --- linux.orig/arch/i386/kernel/i386_ksyms.c Sun Mar 25 18:31:45 2001 +++ linux/arch/i386/kernel/i386_ksyms.c Wed Aug 7 17:44:43 2002 @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -19,7 +20,6 @@ #include extern void dump_thread(struct pt_regs *, struct user *); -extern int dump_fpu(elf_fpregset_t *); extern spinlock_t rtc_lock; #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE) @@ -34,6 +34,7 @@ EXPORT_SYMBOL(__verify_write); EXPORT_SYMBOL(dump_thread); EXPORT_SYMBOL(dump_fpu); +EXPORT_SYMBOL(dump_extended_fpu); EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(local_bh_count); diff -urN linux.orig/arch/i386/kernel/i387.c linux/arch/i386/kernel/i387.c --- linux.orig/arch/i386/kernel/i387.c Thu Jan 1 01:00:00 1970 +++ linux/arch/i386/kernel/i387.c Wed Aug 7 17:44:43 2002 @@ -0,0 +1,512 @@ +/* + * linux/arch/i386/kernel/i387.c + * + * Copyright (C) 1994 Linus Torvalds + * + * Pentium III FXSR, SSE support + * General FPU state handling cleanups + * Gareth Hughes , May 2000 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HAVE_FXSR cpu_has_fxsr +#define HAVE_XMM cpu_has_xmm + +#ifdef CONFIG_MATH_EMULATION +#define HAVE_HWFP (boot_cpu_data.hard_math) +#else +#define HAVE_HWFP 1 +#endif + +/* + * The _current_ task is using the FPU for the first time + * so initialize it and set the mxcsr to its default + * value at reset if we support XMM instructions and then + * remeber the current task has used the FPU. + */ +void init_fpu(void) +{ + __asm__("fninit"); + if ( HAVE_XMM ) + load_mxcsr(0x1f80); + + current->used_math = 1; +} + +/* + * FPU lazy state save handling. + */ + +void save_init_fpu( struct task_struct *tsk ) +{ + if ( HAVE_FXSR ) { + asm volatile( "fxsave %0 ; fnclex" + : "=m" (tsk->tss.i387.fxsave) ); + } else { + asm volatile( "fnsave %0 ; fwait" + : "=m" (tsk->tss.i387.fsave) ); + } + tsk->flags &= ~PF_USEDFPU; + stts(); +} + +void restore_fpu( struct task_struct *tsk ) +{ + if ( HAVE_FXSR ) { + asm volatile( "fxrstor %0" + : : "m" (tsk->tss.i387.fxsave) ); + } else { + asm volatile( "frstor %0" + : : "m" (tsk->tss.i387.fsave) ); + } +} + +/* + * FPU tag word conversions. + */ + +static inline unsigned short twd_i387_to_fxsr( unsigned short twd ) +{ + unsigned int tmp; /* to avoid 16 bit prefixes in the code */ + + /* Transform each pair of bits into 01 (valid) or 00 (empty) */ + tmp = ~twd; + tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ + /* and move the valid bits to the lower byte. */ + tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ + tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ + tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ + return tmp; +} + +static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave ) +{ + struct _fpxreg *st = NULL; + unsigned long twd = (unsigned long) fxsave->twd; + unsigned long tag; + unsigned long ret = 0xffff0000; + int i; + +#define FPREG_ADDR(f, n) ((char *)&(f)->st_space + (n) * 16); + + for ( i = 0 ; i < 8 ; i++ ) { + if ( twd & 0x1 ) { + st = (struct _fpxreg *) FPREG_ADDR( fxsave, i ); + + switch ( st->exponent & 0x7fff ) { + case 0x7fff: + tag = 2; /* Special */ + break; + case 0x0000: + if ( !st->significand[0] && + !st->significand[1] && + !st->significand[2] && + !st->significand[3] ) { + tag = 1; /* Zero */ + } else { + tag = 2; /* Special */ + } + break; + default: + if ( st->significand[3] & 0x8000 ) { + tag = 0; /* Valid */ + } else { + tag = 2; /* Special */ + } + break; + } + } else { + tag = 3; /* Empty */ + } + ret |= (tag << (2 * i)); + twd = twd >> 1; + } + return ret; +} + +/* + * FPU state interaction. + */ + +unsigned short get_fpu_cwd( struct task_struct *tsk ) +{ + if ( HAVE_FXSR ) { + return tsk->tss.i387.fxsave.cwd; + } else { + return (unsigned short)tsk->tss.i387.fsave.cwd; + } +} + +unsigned short get_fpu_swd( struct task_struct *tsk ) +{ + if ( HAVE_FXSR ) { + return tsk->tss.i387.fxsave.swd; + } else { + return (unsigned short)tsk->tss.i387.fsave.swd; + } +} + +unsigned short get_fpu_twd( struct task_struct *tsk ) +{ + if ( HAVE_FXSR ) { + return tsk->tss.i387.fxsave.twd; + } else { + return (unsigned short)tsk->tss.i387.fsave.twd; + } +} + +unsigned short get_fpu_mxcsr( struct task_struct *tsk ) +{ + if ( HAVE_XMM ) { + return tsk->tss.i387.fxsave.mxcsr; + } else { + return 0x1f80; + } +} + +void set_fpu_cwd( struct task_struct *tsk, unsigned short cwd ) +{ + if ( HAVE_FXSR ) { + tsk->tss.i387.fxsave.cwd = cwd; + } else { + tsk->tss.i387.fsave.cwd = ((long)cwd | 0xffff0000); + } +} + +void set_fpu_swd( struct task_struct *tsk, unsigned short swd ) +{ + if ( HAVE_FXSR ) { + tsk->tss.i387.fxsave.swd = swd; + } else { + tsk->tss.i387.fsave.swd = ((long)swd | 0xffff0000); + } +} + +void set_fpu_twd( struct task_struct *tsk, unsigned short twd ) +{ + if ( HAVE_FXSR ) { + tsk->tss.i387.fxsave.twd = twd_i387_to_fxsr(twd); + } else { + tsk->tss.i387.fsave.twd = ((long)twd | 0xffff0000); + } +} + +void set_fpu_mxcsr( struct task_struct *tsk, unsigned short mxcsr ) +{ + if ( HAVE_XMM ) { + tsk->tss.i387.fxsave.mxcsr = (mxcsr & 0xffbf); + } +} + +/* + * FXSR floating point environment conversions. + */ + +static inline int convert_fxsr_to_user( struct _fpstate *buf, + struct i387_fxsave_struct *fxsave ) +{ + unsigned long env[7]; + struct _fpreg *to; + struct _fpxreg *from; + int i; + + env[0] = (unsigned long)fxsave->cwd | 0xffff0000; + env[1] = (unsigned long)fxsave->swd | 0xffff0000; + env[2] = twd_fxsr_to_i387(fxsave); + env[3] = fxsave->fip; + env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16); + env[5] = fxsave->foo; + env[6] = fxsave->fos; + + if ( __copy_to_user( buf, env, 7 * sizeof(unsigned long) ) ) + return 1; + + to = &buf->_st[0]; + from = (struct _fpxreg *) &fxsave->st_space[0]; + for ( i = 0 ; i < 8 ; i++, to++, from++ ) { + if ( __copy_to_user( to, from, sizeof(*to) ) ) + return 1; + } + return 0; +} + +static inline int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave, + struct _fpstate *buf ) +{ + unsigned long env[7]; + struct _fpxreg *to; + struct _fpreg *from; + int i; + + if ( __copy_from_user( env, buf, 7 * sizeof(long) ) ) + return 1; + + fxsave->cwd = (unsigned short)(env[0] & 0xffff); + fxsave->swd = (unsigned short)(env[1] & 0xffff); + fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff)); + fxsave->fip = env[3]; + fxsave->fop = (unsigned short)((env[4] & 0xffff0000) >> 16); + fxsave->fcs = (env[4] & 0xffff); + fxsave->foo = env[5]; + fxsave->fos = env[6]; + + to = (struct _fpxreg *) &fxsave->st_space[0]; + from = &buf->_st[0]; + for ( i = 0 ; i < 8 ; i++, to++, from++ ) { + if ( __copy_from_user( to, from, sizeof(*from) ) ) + return 1; + } + return 0; +} + +/* + * Signal frame handlers. + */ + +static inline int save_i387_fsave( struct _fpstate *buf ) +{ + struct task_struct *tsk = current; + + unlazy_fpu( tsk ); + tsk->tss.i387.fsave.status = tsk->tss.i387.fsave.swd; + if ( __copy_to_user( buf, &tsk->tss.i387.fsave, + sizeof(struct i387_fsave_struct) ) ) + return -1; + return 1; +} + +static inline int save_i387_fxsave( struct _fpstate *buf ) +{ + struct task_struct *tsk = current; + int err = 0; + + unlazy_fpu( tsk ); + + if ( convert_fxsr_to_user( buf, &tsk->tss.i387.fxsave ) ) + return -1; + + err |= __put_user( tsk->tss.i387.fxsave.swd, &buf->status ); + err |= __put_user( X86_FXSR_MAGIC, &buf->magic ); + if ( err ) + return -1; + + if ( __copy_to_user( &buf->_fxsr_env[0], &tsk->tss.i387.fxsave, + sizeof(struct i387_fxsave_struct) ) ) + return -1; + return 1; +} + +int save_i387( struct _fpstate *buf ) +{ + if ( !current->used_math ) + return 0; + + /* This will cause a "finit" to be triggered by the next + * attempted FPU operation by the 'current' process. + */ + current->used_math = 0; + + if ( HAVE_HWFP ) { + if ( HAVE_FXSR ) { + return save_i387_fxsave( buf ); + } else { + return save_i387_fsave( buf ); + } + } else { + return save_i387_soft( ¤t->tss.i387.soft, buf ); + } +} + +static inline int restore_i387_fsave( struct _fpstate *buf ) +{ + struct task_struct *tsk = current; + clear_fpu( tsk ); + return __copy_from_user( &tsk->tss.i387.fsave, buf, + sizeof(struct i387_fsave_struct) ); +} + +static inline int restore_i387_fxsave( struct _fpstate *buf ) +{ + struct task_struct *tsk = current; + clear_fpu( tsk ); + if ( __copy_from_user( &tsk->tss.i387.fxsave, &buf->_fxsr_env[0], + sizeof(struct i387_fxsave_struct) ) ) + return 1; + /* bit 6 and 31-16 must be zero for security reasons */ + tsk->tss.i387.fxsave.mxcsr &= 0xffbf; + return convert_fxsr_from_user( &tsk->tss.i387.fxsave, buf ); +} + +int restore_i387( struct _fpstate *buf ) +{ + int err; + + if ( HAVE_HWFP ) { + if ( HAVE_FXSR ) { + err = restore_i387_fxsave( buf ); + } else { + err = restore_i387_fsave( buf ); + } + } else { + err = restore_i387_soft( ¤t->tss.i387.soft, buf ); + } + current->used_math = 1; + return err; +} + +/* + * ptrace request handlers. + */ + +static inline int get_fpregs_fsave( struct user_i387_struct *buf, + struct task_struct *tsk ) +{ + return __copy_to_user( buf, &tsk->tss.i387.fsave, + sizeof(struct user_i387_struct) ); +} + +static inline int get_fpregs_fxsave( struct user_i387_struct *buf, + struct task_struct *tsk ) +{ + return convert_fxsr_to_user( (struct _fpstate *)buf, + &tsk->tss.i387.fxsave ); +} + +int get_fpregs( struct user_i387_struct *buf, struct task_struct *tsk ) +{ + if ( HAVE_HWFP ) { + if ( HAVE_FXSR ) { + return get_fpregs_fxsave( buf, tsk ); + } else { + return get_fpregs_fsave( buf, tsk ); + } + } else { + return save_i387_soft( &tsk->tss.i387.soft, + (struct _fpstate *)buf ); + } +} + +static inline int set_fpregs_fsave( struct task_struct *tsk, + struct user_i387_struct *buf ) +{ + return __copy_from_user( &tsk->tss.i387.fsave, buf, + sizeof(struct user_i387_struct) ); +} + +static inline int set_fpregs_fxsave( struct task_struct *tsk, + struct user_i387_struct *buf ) +{ + return convert_fxsr_from_user( &tsk->tss.i387.fxsave, + (struct _fpstate *)buf ); +} + +int set_fpregs( struct task_struct *tsk, struct user_i387_struct *buf ) +{ + if ( HAVE_HWFP ) { + if ( HAVE_FXSR ) { + return set_fpregs_fxsave( tsk, buf ); + } else { + return set_fpregs_fsave( tsk, buf ); + } + } else { + return restore_i387_soft( &tsk->tss.i387.soft, + (struct _fpstate *)buf ); + } +} + +int get_fpxregs( struct user_fxsr_struct *buf, struct task_struct *tsk ) +{ + if ( HAVE_FXSR ) { + if (__copy_to_user( (void *)buf, &tsk->tss.i387.fxsave, + sizeof(struct user_fxsr_struct) )) + return -EFAULT; + return 0; + } else { + return -EIO; + } +} + +int set_fpxregs( struct task_struct *tsk, struct user_fxsr_struct *buf ) +{ + if ( HAVE_FXSR ) { + int error; + + error = __copy_from_user(&tsk->tss.i387.fxsave, (void *)buf, + sizeof(struct user_fxsr_struct)); + /* bit 6 and 31-16 must be zero for security reasons */ + tsk->tss.i387.fxsave.mxcsr &= 0xffbf; + + return error ? -EFAULT : 0; + } else { + return -EIO; + } +} + +/* + * FPU state for core dumps. + */ + +static inline void copy_fpu_fsave( struct task_struct *tsk, + struct user_i387_struct *fpu ) +{ + memcpy( fpu, &tsk->tss.i387.fsave, + sizeof(struct user_i387_struct) ); +} + +static inline void copy_fpu_fxsave( struct task_struct *tsk, + struct user_i387_struct *fpu ) +{ + unsigned short *to; + unsigned short *from; + int i; + + memcpy( fpu, &tsk->tss.i387.fxsave, 7 * sizeof(long) ); + + to = (unsigned short *)&fpu->st_space[0]; + from = (unsigned short *)&tsk->tss.i387.fxsave.st_space[0]; + for ( i = 0 ; i < 8 ; i++, to += 5, from += 8 ) { + memcpy( to, from, 5 * sizeof(unsigned short) ); + } +} + +int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu ) +{ + int fpvalid; + struct task_struct *tsk = current; + + fpvalid = tsk->used_math; + if ( fpvalid ) { + unlazy_fpu( tsk ); + if ( HAVE_FXSR ) { + copy_fpu_fxsave( tsk, fpu ); + } else { + copy_fpu_fsave( tsk, fpu ); + } + } + + return fpvalid; +} + +int dump_extended_fpu( struct pt_regs *regs, struct user_fxsr_struct *fpu ) +{ + int fpvalid; + struct task_struct *tsk = current; + + fpvalid = tsk->used_math && HAVE_FXSR; + if ( fpvalid ) { + unlazy_fpu( tsk ); + memcpy( fpu, &tsk->tss.i387.fxsave, + sizeof(struct user_fxsr_struct) ); + } + + return fpvalid; +} diff -urN linux.orig/arch/i386/kernel/irq.c linux/arch/i386/kernel/irq.c --- linux.orig/arch/i386/kernel/irq.c Sun Mar 25 18:31:45 2001 +++ linux/arch/i386/kernel/irq.c Wed Aug 7 17:44:43 2002 @@ -381,10 +381,11 @@ static void math_error_irq(int cpl, void *dev_id, struct pt_regs *regs) { + extern void math_error(void *); outb(0,0xF0); if (ignore_irq13 || !boot_cpu_data.hard_math) return; - math_error(); + math_error((void *)regs->eip); } static struct irqaction irq13 = { math_error_irq, 0, 0, "fpu", NULL, NULL }; diff -urN linux.orig/arch/i386/kernel/irq.h linux/arch/i386/kernel/irq.h --- linux.orig/arch/i386/kernel/irq.h Fri Nov 2 17:39:05 2001 +++ linux/arch/i386/kernel/irq.h Wed Aug 7 17:44:10 2002 @@ -40,7 +40,9 @@ struct hw_interrupt_type *handler; /* handle/enable/disable functions */ struct irqaction *action; /* IRQ action list */ unsigned int depth; /* Disable depth for nested irq disables */ - unsigned int unused[4]; +#ifdef CONFIG_SMP + unsigned char unused[L1_CACHE_BYTES-16]; +#endif } irq_desc_t; /* diff -urN linux.orig/arch/i386/kernel/process.c linux/arch/i386/kernel/process.c --- linux.orig/arch/i386/kernel/process.c Fri Nov 2 17:39:05 2001 +++ linux/arch/i386/kernel/process.c Wed Aug 7 17:56:44 2002 @@ -2,6 +2,9 @@ * linux/arch/i386/kernel/process.c * * Copyright (C) 1995 Linus Torvalds + * + * Pentium III FXSR, SSE support + * Gareth Hughes , May 2000 */ /* @@ -40,6 +43,7 @@ #include #include #include +#include #ifdef CONFIG_MATH_EMULATION #include #endif @@ -384,23 +388,15 @@ void show_regs(struct pt_regs * regs) { + extern void show_registers(struct pt_regs *); long cr0 = 0L, cr2 = 0L, cr3 = 0L; printk("\n"); - printk("EIP: %04x:[<%08lx>]",0xffff & regs->xcs,regs->eip); - if (regs->xcs & 3) - printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); - printk(" EFLAGS: %08lx\n",regs->eflags); - printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", - regs->eax,regs->ebx,regs->ecx,regs->edx); - printk("ESI: %08lx EDI: %08lx EBP: %08lx", - regs->esi, regs->edi, regs->ebp); - printk(" DS: %04x ES: %04x\n", - 0xffff & regs->xds,0xffff & regs->xes); __asm__("movl %%cr0, %0": "=r" (cr0)); __asm__("movl %%cr2, %0": "=r" (cr2)); __asm__("movl %%cr3, %0": "=r" (cr3)); printk("CR0: %08lx CR2: %08lx CR3: %08lx\n", cr0, cr2, cr3); + show_registers(regs); } /* @@ -615,23 +611,6 @@ } /* - * fill in the FPU structure for a core dump. - */ -int dump_fpu (struct pt_regs * regs, struct user_i387_struct* fpu) -{ - int fpvalid; - struct task_struct *tsk = current; - - fpvalid = tsk->used_math; - if (fpvalid) { - unlazy_fpu(tsk); - memcpy(fpu,&tsk->tss.i387.hard,sizeof(*fpu)); - } - - return fpvalid; -} - -/* * fill in the user structure for a core dump.. */ void dump_thread(struct pt_regs * regs, struct user * dump) diff -urN linux.orig/arch/i386/kernel/ptrace.c linux/arch/i386/kernel/ptrace.c --- linux.orig/arch/i386/kernel/ptrace.c Fri Nov 2 17:39:05 2001 +++ linux/arch/i386/kernel/ptrace.c Wed Aug 7 17:44:43 2002 @@ -1,8 +1,11 @@ /* ptrace.c */ /* By Ross Biro 1/23/92 */ /* edited by Linus Torvalds */ +/* + * Pentium III FXSR, SSE support + * Gareth Hughes , May 2000 + */ -#include /* for CONFIG_MATH_EMULATION */ #include #include #include @@ -18,6 +21,7 @@ #include #include #include +#include /* * does not yet catch signals sent when the child dies. @@ -658,21 +662,11 @@ ret = 0; if ( !child->used_math ) { /* Simulate an empty FPU. */ - child->tss.i387.hard.cwd = 0xffff037f; - child->tss.i387.hard.swd = 0xffff0000; - child->tss.i387.hard.twd = 0xffffffff; - } -#ifdef CONFIG_MATH_EMULATION - if ( boot_cpu_data.hard_math ) { -#endif - __copy_to_user((void *)data, &child->tss.i387.hard, - sizeof(struct user_i387_struct)); -#ifdef CONFIG_MATH_EMULATION - } else { - save_i387_soft(&child->tss.i387.soft, - (struct _fpstate *)data); + set_fpu_cwd(child, 0x037f); + set_fpu_swd(child, 0x0000); + set_fpu_twd(child, 0xffff); } -#endif + get_fpregs((struct user_i387_struct *)data, child); goto out; }; @@ -684,21 +678,39 @@ goto out; } child->used_math = 1; -#ifdef CONFIG_MATH_EMULATION - if ( boot_cpu_data.hard_math ) { -#endif - __copy_from_user(&child->tss.i387.hard, (void *)data, - sizeof(struct user_i387_struct)); -#ifdef CONFIG_MATH_EMULATION - } else { - restore_i387_soft(&child->tss.i387.soft, - (struct _fpstate *)data); - } -#endif + set_fpregs(child, (struct user_i387_struct *)data); ret = 0; goto out; }; + case PTRACE_GETFPXREGS: { /* Get the child extended FPU state. */ + if (!access_ok(VERIFY_WRITE, (unsigned *)data, + sizeof(struct user_fxsr_struct))) { + ret = -EIO; + goto out; + } + if ( !child->used_math ) { + /* Simulate an empty FPU. */ + set_fpu_cwd(child, 0x037f); + set_fpu_swd(child, 0x0000); + set_fpu_twd(child, 0xffff); + set_fpu_mxcsr(child, 0x1f80); + } + ret = get_fpxregs((struct user_fxsr_struct *)data, child); + goto out; + }; + + case PTRACE_SETFPXREGS: { /* Set the child extended FPU state. */ + if (!access_ok(VERIFY_READ, (unsigned *)data, + sizeof(struct user_fxsr_struct))) { + ret = -EIO; + goto out; + } + child->used_math = 1; + ret = set_fpxregs(child, (struct user_fxsr_struct *)data); + goto out; + }; + default: ret = -EIO; goto out; diff -urN linux.orig/arch/i386/kernel/setup.c linux/arch/i386/kernel/setup.c --- linux.orig/arch/i386/kernel/setup.c Tue May 21 01:32:34 2002 +++ linux/arch/i386/kernel/setup.c Wed Aug 7 17:44:43 2002 @@ -86,6 +86,8 @@ char ignore_irq13 = 0; /* set if exception 16 works */ struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; +unsigned long x86_cr4; + /* * Bus types .. */ @@ -124,6 +126,7 @@ extern unsigned long cpu_khz; static int disable_x86_serial_nr __initdata = 1; +static int disable_x86_fxsr __initdata = 0; /* * This is set up by the setup-routine at boot-time @@ -1308,7 +1311,8 @@ static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c) { - if (c->x86_capability&(X86_FEATURE_PN) && disable_x86_serial_nr) { + if (c->x86_capability&(X86_FEATURE_PN) && (c->x86_vendor == X86_VENDOR_INTEL || c->x86_vendor == X86_VENDOR_TRANSMETA) && + disable_x86_serial_nr) { /* Disable processor serial number */ unsigned long lo,hi; rdmsr(0x119,lo,hi); @@ -1326,6 +1330,12 @@ } __setup("serialnumber", x86_serial_nr_setup); +int __init x86_fxsr_setup(char * s) +{ + disable_x86_fxsr = 1; + return 1; +} +__setup("nofxsr", x86_fxsr_setup); __initfunc(void identify_cpu(struct cpuinfo_x86 *c)) { @@ -1377,6 +1387,13 @@ break; } + /* FXSR disabled? */ + if (disable_x86_fxsr) { + printk(KERN_INFO "Disabling fast FPU save and restore.\n"); + clear_bit(X86_FEATURE_FXSR, &c->x86_capability); + clear_bit(X86_FEATURE_XMM, &c->x86_capability); + } + squash_the_stupid_serial_number(c); mcheck_init(c); diff -urN linux.orig/arch/i386/kernel/signal.c linux/arch/i386/kernel/signal.c --- linux.orig/arch/i386/kernel/signal.c Fri Nov 2 17:39:05 2001 +++ linux/arch/i386/kernel/signal.c Wed Aug 7 17:44:43 2002 @@ -4,6 +4,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds * * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson + * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes */ #include @@ -21,6 +22,7 @@ #include #include #include +#include #define DEBUG_SIG 0 @@ -150,29 +152,6 @@ char retcode[8]; }; - -static inline int restore_i387_hard(struct _fpstate *buf) -{ - struct task_struct *tsk = current; - clear_fpu(tsk); - return __copy_from_user(&tsk->tss.i387.hard, buf, sizeof(*buf)); -} - -static inline int restore_i387(struct _fpstate *buf) -{ - int err; -#ifndef CONFIG_MATH_EMULATION - err = restore_i387_hard(buf); -#else - if (boot_cpu_data.hard_math) - err = restore_i387_hard(buf); - else - err = restore_i387_soft(¤t->tss.i387.soft, buf); -#endif - current->used_math = 1; - return err; -} - static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, int *peax) { @@ -299,39 +278,6 @@ return 0; } -/* - * Set up a signal frame. - */ - -static inline int save_i387_hard(struct _fpstate * buf) -{ - struct task_struct *tsk = current; - - unlazy_fpu(tsk); - tsk->tss.i387.hard.status = tsk->tss.i387.hard.swd; - if (__copy_to_user(buf, &tsk->tss.i387.hard, sizeof(*buf))) - return -1; - return 1; -} - -static int save_i387(struct _fpstate *buf) -{ - if (!current->used_math) - return 0; - - /* This will cause a "finit" to be triggered by the next - attempted FPU operation by the 'current' process. - */ - current->used_math = 0; - -#ifndef CONFIG_MATH_EMULATION - return save_i387_hard(buf); -#else - return boot_cpu_data.hard_math ? save_i387_hard(buf) - : save_i387_soft(¤t->tss.i387.soft, buf); -#endif -} - static int setup_sigcontext(struct sigcontext *sc, struct _fpstate *fpstate, struct pt_regs *regs, unsigned long mask) diff -urN linux.orig/arch/i386/kernel/smp.c linux/arch/i386/kernel/smp.c --- linux.orig/arch/i386/kernel/smp.c Sun Mar 25 18:37:29 2001 +++ linux/arch/i386/kernel/smp.c Wed Aug 7 17:58:31 2002 @@ -112,7 +112,7 @@ static volatile unsigned long cpu_callout_map[NR_CPUS] = {0,}; /* We always use 0 the rest is ready for parallel delivery */ volatile unsigned long smp_invalidate_needed; /* Used for the invalidate map that's also checked in the spinlock */ volatile unsigned long kstack_ptr; /* Stack vector for booting CPUs */ -struct cpuinfo_x86 cpu_data[NR_CPUS]; /* Per CPU bogomips and other parameters */ +struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned = { { 0, }, }; /* Per CPU bogomips and other parameters */ static unsigned int num_processors = 1; /* Internal processor count */ unsigned char boot_cpu_id = 0; /* Processor that is doing the boot up */ static int smp_activated = 0; /* Tripped once we need to start cross invalidating */ @@ -737,12 +737,6 @@ { unsigned long value; - value = apic_read(APIC_SPIV); - value |= (1<<8); /* Enable APIC (bit==1) */ - value &= ~(1<<9); /* Enable focus processor (bit==0) */ - value |= 0xff; /* Set spurious IRQ vector to 0xff */ - apic_write(APIC_SPIV,value); - /* * Set Task Priority to 'accept all' */ @@ -762,6 +756,12 @@ value |= SET_APIC_DFR(0xf); apic_write(APIC_DFR, value); + value = apic_read(APIC_SPIV); + value |= (1<<8); /* Enable APIC (bit==1) */ + value &= ~(1<<9); /* Enable focus processor (bit==0) */ + value |= 0xff; /* Set spurious IRQ vector to 0xff */ + apic_write(APIC_SPIV,value); + udelay(100); /* B safe */ } @@ -810,7 +810,6 @@ return memory_start; } -#ifdef CONFIG_X86_TSC /* * TSC synchronization. * @@ -1010,8 +1009,6 @@ } #undef NR_LOOPS -#endif - extern void calibrate_delay(void); void __init smp_callin(void) @@ -1098,12 +1095,11 @@ */ set_bit(cpuid, (unsigned long *)&cpu_callin_map[0]); -#ifdef CONFIG_X86_TSC /* * Synchronize the TSC with the BP */ - synchronize_tsc_ap (); -#endif + if (boot_cpu_data.x86_capability & X86_FEATURE_TSC) + synchronize_tsc_ap (); } int cpucount = 0; @@ -1640,13 +1636,11 @@ smp_done: -#ifdef CONFIG_X86_TSC /* * Synchronize the TSC with the AP */ - if (cpucount) + if (boot_cpu_data.x86_capability & X86_FEATURE_TSC && cpucount) synchronize_tsc_bp(); -#endif } /* diff -urN linux.orig/arch/i386/kernel/time.c linux/arch/i386/kernel/time.c --- linux.orig/arch/i386/kernel/time.c Sun Mar 25 18:37:30 2001 +++ linux/arch/i386/kernel/time.c Wed Aug 7 17:53:48 2002 @@ -247,6 +247,20 @@ #endif +/* FIXME: should be inline but gcc is buggy and breaks */ +static void +timeval_normalize(struct timeval * tv) +{ + time_t __sec; + + __sec = tv->tv_usec / 1000000; + if (__sec) + { + tv->tv_usec %= 1000000; + tv->tv_sec += __sec; + } +} + /* * This version of gettimeofday has microsecond resolution * and better than microsecond precision on fast x86 machines with TSC. @@ -267,13 +281,10 @@ usec += xtime.tv_usec; read_unlock_irqrestore(&xtime_lock, flags); - while (usec >= 1000000) { - usec -= 1000000; - sec++; - } - tv->tv_sec = sec; tv->tv_usec = usec; + + timeval_normalize(tv); } void do_settimeofday(struct timeval *tv) diff -urN linux.orig/arch/i386/kernel/traps.c linux/arch/i386/kernel/traps.c --- linux.orig/arch/i386/kernel/traps.c Tue May 21 01:32:34 2002 +++ linux/arch/i386/kernel/traps.c Wed Aug 7 17:56:44 2002 @@ -2,6 +2,9 @@ * linux/arch/i386/traps.c * * Copyright (C) 1991, 1992 Linus Torvalds + * + * Pentium III FXSR, SSE support + * Gareth Hughes , May 2000 */ /* @@ -33,6 +36,7 @@ #include #include #include +#include #include @@ -106,6 +110,7 @@ asmlinkage void general_protection(void); asmlinkage void page_fault(void); asmlinkage void coprocessor_error(void); +asmlinkage void simd_coprocessor_error(void); asmlinkage void alignment_check(void); asmlinkage void spurious_interrupt_bug(void); asmlinkage void machine_check(void); @@ -120,7 +125,7 @@ #define VMALLOC_OFFSET (8*1024*1024) #define MODULE_RANGE (8*1024*1024) -static void show_registers(struct pt_regs *regs) +void show_registers(struct pt_regs *regs) { int i; int in_kernel = 1; @@ -428,25 +433,138 @@ * the correct behaviour even in the presence of the asynchronous * IRQ13 behaviour */ -void math_error(void) +void math_error(void *eip) { struct task_struct * task; + siginfo_t info; + unsigned short cwd, swd; /* * Save the info for the exception handler * (this will also clear the error) */ task = current; - save_fpu(task); + save_init_fpu(task); task->tss.trap_no = 16; task->tss.error_code = 0; - force_sig(SIGFPE, task); + info.si_signo = SIGFPE; + info.si_errno = 0; + info.si_code = __SI_FAULT; + info.si_addr = eip; + /* + * (~cwd & swd) will mask out exceptions that are not set to unmasked + * status. 0x3f is the exception bits in these regs, 0x200 is the + * C1 reg you need in case of a stack fault, 0x040 is the stack + * fault bit. We should only be taking one exception at a time, + * so if this combination doesn't produce any single exception, + * then we have a bad program that isn't syncronizing its FPU usage + * and it will suffer the consequences since we won't be able to + * fully reproduce the context of the exception + */ + cwd = get_fpu_cwd(task); + swd = get_fpu_swd(task); + switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) { + case 0x000: + default: + break; + case 0x001: /* Invalid Op */ + case 0x040: /* Stack Fault */ + case 0x240: /* Stack Fault | Direction */ + info.si_code = FPE_FLTINV; + break; + case 0x002: /* Denormalize */ + case 0x010: /* Underflow */ + info.si_code = FPE_FLTUND; + break; + case 0x004: /* Zero Divide */ + info.si_code = FPE_FLTDIV; + break; + case 0x008: /* Overflow */ + info.si_code = FPE_FLTOVF; + break; + case 0x020: /* Precision */ + info.si_code = FPE_FLTRES; + break; + } + force_sig_info(SIGFPE, &info, task); } asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code) { ignore_irq13 = 1; - math_error(); + math_error((void *)regs->eip); +} + +void simd_math_error(void *eip) +{ + struct task_struct * task; + siginfo_t info; + unsigned short mxcsr; + + /* + * Save the info for the exception handler and clear the error. + */ + task = current; + save_init_fpu(task); + task->tss.trap_no = 19; + task->tss.error_code = 0; + info.si_signo = SIGFPE; + info.si_errno = 0; + info.si_code = __SI_FAULT; + info.si_addr = eip; + /* + * The SIMD FPU exceptions are handled a little differently, as there + * is only a single status/control register. Thus, to determine which + * unmasked exception was caught we must mask the exception mask bits + * at 0x1f80, and then use these to mask the exception bits at 0x3f. + */ + mxcsr = get_fpu_mxcsr(task); + switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { + case 0x000: + default: + break; + case 0x001: /* Invalid Op */ + info.si_code = FPE_FLTINV; + break; + case 0x002: /* Denormalize */ + case 0x010: /* Underflow */ + info.si_code = FPE_FLTUND; + break; + case 0x004: /* Zero Divide */ + info.si_code = FPE_FLTDIV; + break; + case 0x008: /* Overflow */ + info.si_code = FPE_FLTOVF; + break; + case 0x020: /* Precision */ + info.si_code = FPE_FLTRES; + break; + } + force_sig_info(SIGFPE, &info, task); +} + +asmlinkage void do_simd_coprocessor_error(struct pt_regs * regs, + long error_code) +{ + if (cpu_has_xmm) { + /* Handle SIMD FPU exceptions on PIII+ processors. */ + ignore_irq13 = 1; + simd_math_error((void *)regs->eip); + } else { + /* + * Handle strange cache flush from user space exception + * in all other cases. This is undocumented behaviour. + */ + if (regs->eflags & VM_MASK) { + handle_vm86_fault((struct kernel_vm86_regs *)regs, + error_code); + return; + } + die_if_kernel("cache flush denied", regs, error_code); + current->tss.trap_no = 19; + current->tss.error_code = error_code; + force_sig(SIGSEGV, current); + } } asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs, @@ -468,17 +586,13 @@ asmlinkage void math_state_restore(struct pt_regs regs) { __asm__ __volatile__("clts"); /* Allow maths ops (or we recurse) */ - if(current->used_math) - __asm__("frstor %0": :"m" (current->tss.i387)); - else - { - /* - * Our first FPU usage, clean the chip. - */ - __asm__("fninit"); - current->used_math = 1; + + if (current->used_math) { + restore_fpu(current); + } else { + init_fpu(); } - current->flags|=PF_USEDFPU; /* So we fnsave on switch_to() */ + current->flags |= PF_USEDFPU; /* So we fnsave on switch_to() */ } #ifndef CONFIG_MATH_EMULATION @@ -708,6 +822,7 @@ set_trap_gate(16,&coprocessor_error); set_trap_gate(17,&alignment_check); set_trap_gate(18,&machine_check); + set_trap_gate(19,&simd_coprocessor_error); set_system_gate(SYSCALL_VECTOR,&system_call); /* set up GDT task & ldt entries */ diff -urN linux.orig/arch/i386/mm/fault.c linux/arch/i386/mm/fault.c --- linux.orig/arch/i386/mm/fault.c Sun Mar 25 18:31:45 2001 +++ linux/arch/i386/mm/fault.c Wed Aug 7 17:57:18 2002 @@ -29,13 +29,13 @@ */ int __verify_write(const void * addr, unsigned long size) { - struct vm_area_struct * vma; + struct vm_area_struct * vma, * prev_vma; unsigned long start = (unsigned long) addr; if (!size) return 1; - vma = find_vma(current->mm, start); + vma = find_vma_prev(current->mm, start, &prev_vma); if (!vma) goto bad_area; if (vma->vm_start > start) @@ -75,7 +75,7 @@ check_stack: if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; - if (expand_stack(vma, start) == 0) + if (expand_stack(vma, start, prev_vma) == 0) goto good_area; bad_area: @@ -112,7 +112,7 @@ { struct task_struct *tsk; struct mm_struct *mm; - struct vm_area_struct * vma; + struct vm_area_struct * vma, * prev_vma; unsigned long address; unsigned long page; unsigned long fixup; @@ -133,7 +133,7 @@ down(&mm->mmap_sem); - vma = find_vma(mm, address); + vma = find_vma_prev(mm, address, &prev_vma); if (!vma) goto bad_area; if (vma->vm_start <= address) @@ -150,7 +150,7 @@ if (address + 32 < regs->esp) goto bad_area; } - if (expand_stack(vma, address)) + if (expand_stack(vma, address, prev_vma)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so diff -urN linux.orig/arch/i386/vmlinux.lds.S linux/arch/i386/vmlinux.lds.S --- linux.orig/arch/i386/vmlinux.lds.S Sun Mar 25 18:31:45 2001 +++ linux/arch/i386/vmlinux.lds.S Wed Aug 7 17:44:10 2002 @@ -1,6 +1,7 @@ /* ld script to make i386 Linux kernel * Written by Martin Mares ; */ +#include OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") OUTPUT_ARCH(i386) ENTRY(_start) @@ -53,7 +54,7 @@ __init_end = .; - . = ALIGN(32); + . = ALIGN(L1_CACHE_BYTES); .data.cacheline_aligned : { *(.data.cacheline_aligned) } . = ALIGN(4096); diff -urN linux.orig/arch/ppc/kernel/time.c linux/arch/ppc/kernel/time.c --- linux.orig/arch/ppc/kernel/time.c Sun Mar 25 18:31:49 2001 +++ linux/arch/ppc/kernel/time.c Wed Aug 7 17:53:48 2002 @@ -147,6 +147,19 @@ hardirq_exit(cpu); } +static inline void +timeval_normalize(struct timeval * tv) +{ + time_t __sec; + + __sec = tv->tv_usec / 1000000; + if (__sec) + { + tv->tv_usec %= 1000000; + tv->tv_sec += __sec; + } +} + /* * This version of gettimeofday has microsecond resolution. */ @@ -161,10 +174,7 @@ #ifndef __SMP__ tv->tv_usec += (decrementer_count - get_dec()) * count_period_num / count_period_den; - if (tv->tv_usec >= 1000000) { - tv->tv_usec -= 1000000; - tv->tv_sec++; - } + timeval_normalize(tv); #endif restore_flags(flags); } diff -urN linux.orig/arch/ppc/mm/fault.c linux/arch/ppc/mm/fault.c --- linux.orig/arch/ppc/mm/fault.c Sun Mar 25 18:31:48 2001 +++ linux/arch/ppc/mm/fault.c Wed Aug 7 17:57:18 2002 @@ -58,7 +58,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, unsigned long error_code) { - struct vm_area_struct * vma; + struct vm_area_struct * vma, * prev_vma; struct mm_struct *mm = current->mm; int fault; @@ -92,14 +92,14 @@ } down(&mm->mmap_sem); - vma = find_vma(mm, address); + vma = find_vma_prev(mm, address, &prev_vma); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; - if (expand_stack(vma, address)) + if (expand_stack(vma, address, prev_vma)) goto bad_area; good_area: diff -urN linux.orig/arch/s390/mm/fault.c linux/arch/s390/mm/fault.c --- linux.orig/arch/s390/mm/fault.c Fri Nov 2 17:39:06 2001 +++ linux/arch/s390/mm/fault.c Wed Aug 7 17:57:18 2002 @@ -125,7 +125,7 @@ goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; - if (expand_stack(vma, address)) + if (expand_stack(vma, address, NULL)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so diff -urN linux.orig/arch/sparc/mm/fault.c linux/arch/sparc/mm/fault.c --- linux.orig/arch/sparc/mm/fault.c Sun Mar 25 18:31:47 2001 +++ linux/arch/sparc/mm/fault.c Wed Aug 7 17:57:18 2002 @@ -222,7 +222,7 @@ goto good_area; if(!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; - if(expand_stack(vma, address)) + if(expand_stack(vma, address, NULL)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so @@ -414,7 +414,7 @@ goto good_area; if(!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; - if(expand_stack(vma, address)) + if(expand_stack(vma, address, NULL)) goto bad_area; good_area: if(write) { diff -urN linux.orig/arch/sparc64/mm/fault.c linux/arch/sparc64/mm/fault.c --- linux.orig/arch/sparc64/mm/fault.c Sun Mar 25 18:31:53 2001 +++ linux/arch/sparc64/mm/fault.c Wed Aug 7 17:57:18 2002 @@ -194,7 +194,7 @@ goto good_area; if(!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; - if(expand_stack(vma, address)) + if(expand_stack(vma, address, NULL)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so diff -urN linux.orig/arch/sparc64/solaris/timod.c linux/arch/sparc64/solaris/timod.c --- linux.orig/arch/sparc64/solaris/timod.c Sun Mar 25 18:31:53 2001 +++ linux/arch/sparc64/solaris/timod.c Wed Aug 7 17:45:22 2002 @@ -154,7 +154,7 @@ sock = ¤t->files->fd[fd]->f_dentry->d_inode->u.socket_i; wake_up_interruptible(&sock->wait); if (sock->fasync_list && !(sock->flags & SO_WAITDATA)) - kill_fasync(sock->fasync_list, SIGIO); + kill_fasync(sock->fasync_list, SIGIO, POLL_IN); SOLD("done"); } diff -urN linux.orig/drivers/block/ide-probe.c linux/drivers/block/ide-probe.c --- linux.orig/drivers/block/ide-probe.c Sun Mar 25 18:31:24 2001 +++ linux/drivers/block/ide-probe.c Wed Aug 7 17:50:35 2002 @@ -393,6 +393,7 @@ extern struct drive_info_struct drive_info; byte cmos_disks, *BIOS = (byte *) &drive_info; int unit; + extern spinlock_t rtc_lock; #ifdef CONFIG_BLK_DEV_PDC4030 if (hwif->chipset == ide_pdc4030 && hwif->channel != 0) diff -urN linux.orig/drivers/block/ll_rw_blk.c linux/drivers/block/ll_rw_blk.c --- linux.orig/drivers/block/ll_rw_blk.c Tue May 21 01:32:34 2002 +++ linux/drivers/block/ll_rw_blk.c Wed Aug 7 17:52:39 2002 @@ -419,7 +419,8 @@ case COMPAQ_CISS_MAJOR+4: \ case COMPAQ_CISS_MAJOR+5: \ case COMPAQ_CISS_MAJOR+6: \ - case COMPAQ_CISS_MAJOR+7: + case COMPAQ_CISS_MAJOR+7: \ + case LOOP_MAJOR: #define elevator_starve_rest_of_queue(req) \ do { \ @@ -574,10 +575,9 @@ void make_request(int major, int rw, struct buffer_head * bh) { unsigned int sector, count; - struct request * req, * prev; + struct request * req, * prev, * freereq = NULL; int rw_ahead, max_req, max_sectors, max_segments; unsigned long flags; - int back, front; count = bh->b_size >> 9; sector = bh->b_rsector; @@ -654,6 +654,7 @@ max_sectors = get_max_sectors(bh->b_rdev); max_segments = get_max_segments(bh->b_rdev); + again: /* * Now we acquire the request spinlock, we have to be mega careful * not to schedule or do something nonatomic @@ -684,29 +685,23 @@ req = seek_to_not_starving_chunk(req); prev = NULL; - back = front = 0; do { if (req->cmd != rw) continue; if (req->rq_dev != bh->b_rdev) continue; - if (req->sector + req->nr_sectors == sector) - back = 1; - else if (req->sector - count == sector) - front = 1; - if (req->nr_sectors + count > max_sectors) continue; if (req->sem) continue; /* Can we add it to the end of this request? */ - if (back) { + if (req->sector + req->nr_sectors == sector) { if (req->bhtail->b_data + req->bhtail->b_size != bh->b_data) { if (req->nr_segments < max_segments) req->nr_segments++; - else break; + else continue; } req->bhtail->b_reqnext = bh; req->bhtail = bh; @@ -717,19 +712,19 @@ /* Can we now merge this req with the next? */ attempt_merge(req, max_sectors, max_segments); /* or to the beginning? */ - } else if (front) { + } else if (req->sector - count == sector) { /* * Check that we didn't seek on a starving request, * that could happen only at the first pass, thus * do that only if prev is NULL. */ if (!prev && ((req->cmd != READ && req->cmd != WRITE) || !req->elevator_latency)) - break; + continue; if (bh->b_data + bh->b_size != req->bh->b_data) { if (req->nr_segments < max_segments) req->nr_segments++; - else break; + else continue; } bh->b_reqnext = req->bh; req->bh = bh; @@ -749,15 +744,22 @@ continue; mark_buffer_clean(bh); + if (freereq) { + freereq->rq_status = RQ_INACTIVE; + wake_up(&wait_for_request); + } spin_unlock_irqrestore(&io_request_lock,flags); return; - } while (prev = req, - !front && !back && (req = req->next) != NULL); + } while (prev = req, (req = req->next) != NULL); } /* find an unused request. */ - req = get_request(max_req, bh->b_rdev); + if (freereq) { + req = freereq; + freereq = NULL; + } else + req = get_request(max_req, bh->b_rdev); spin_unlock_irqrestore(&io_request_lock,flags); @@ -765,7 +767,8 @@ if (!req) { if (rw_ahead) goto end_io; - req = __get_request_wait(max_req, bh->b_rdev); + freereq = __get_request_wait(max_req, bh->b_rdev); + goto again; } /* fill up the request-info, and add it to the queue */ diff -urN linux.orig/drivers/block/loop.c linux/drivers/block/loop.c --- linux.orig/drivers/block/loop.c Sun Mar 25 18:31:24 2001 +++ linux/drivers/block/loop.c Wed Aug 7 17:52:39 2002 @@ -274,6 +274,8 @@ block++; } spin_lock_irq(&io_request_lock); + current_request->sector += current_request->current_nr_sectors; + current_request->nr_sectors -= current_request->current_nr_sectors; current_request->next=CURRENT; CURRENT=current_request; end_request(1); diff -urN linux.orig/drivers/char/adbmouse.c linux/drivers/char/adbmouse.c --- linux.orig/drivers/char/adbmouse.c Sun Mar 25 18:31:26 2001 +++ linux/drivers/char/adbmouse.c Wed Aug 7 17:45:22 2002 @@ -135,7 +135,7 @@ mouse.ready = 1; wake_up_interruptible(&mouse.wait); if (mouse.fasyncptr) - kill_fasync(mouse.fasyncptr, SIGIO); + kill_fasync(mouse.fasyncptr, SIGIO, POLL_IN); } static int fasync_mouse(int fd, struct file *filp, int on) diff -urN linux.orig/drivers/char/amigamouse.c linux/drivers/char/amigamouse.c --- linux.orig/drivers/char/amigamouse.c Sun Mar 25 18:31:26 2001 +++ linux/drivers/char/amigamouse.c Wed Aug 7 17:45:22 2002 @@ -154,7 +154,7 @@ mouse.dy = 2048; if (mouse.fasyncptr) - kill_fasync(mouse.fasyncptr, SIGIO); + kill_fasync(mouse.fasyncptr, SIGIO, POLL_IN); } AMI_MSE_INT_ON(); } diff -urN linux.orig/drivers/char/atarimouse.c linux/drivers/char/atarimouse.c --- linux.orig/drivers/char/atarimouse.c Sun Mar 25 18:31:26 2001 +++ linux/drivers/char/atarimouse.c Wed Aug 7 17:45:22 2002 @@ -49,7 +49,7 @@ mouse.ready = 1; wake_up_interruptible(&mouse.wait); if (mouse.fasyncptr) - kill_fasync(mouse.fasyncptr, SIGIO); + kill_fasync(mouse.fasyncptr, SIGIO, POLL_IN); /* ikbd_mouse_rel_pos(); */ } diff -urN linux.orig/drivers/char/atixlmouse.c linux/drivers/char/atixlmouse.c --- linux.orig/drivers/char/atixlmouse.c Sun Mar 25 18:31:24 2001 +++ linux/drivers/char/atixlmouse.c Wed Aug 7 17:45:22 2002 @@ -90,7 +90,7 @@ mouse.ready = 1; wake_up_interruptible(&mouse.wait); if (mouse.fasync) - kill_fasync(mouse.fasync, SIGIO); + kill_fasync(mouse.fasync, SIGIO, POLL_IN); } ATIXL_MSE_ENABLE_UPDATE(); } diff -urN linux.orig/drivers/char/busmouse.c linux/drivers/char/busmouse.c --- linux.orig/drivers/char/busmouse.c Sun Mar 25 18:31:24 2001 +++ linux/drivers/char/busmouse.c Wed Aug 7 17:45:22 2002 @@ -105,7 +105,7 @@ mouse.dy = 2048; if (mouse.fasyncptr) - kill_fasync(mouse.fasyncptr, SIGIO); + kill_fasync(mouse.fasyncptr, SIGIO, POLL_IN); } MSE_INT_ON(); } diff -urN linux.orig/drivers/char/dn_keyb.c linux/drivers/char/dn_keyb.c --- linux.orig/drivers/char/dn_keyb.c Sun Mar 25 18:31:26 2001 +++ linux/drivers/char/dn_keyb.c Wed Aug 7 17:45:22 2002 @@ -468,7 +468,7 @@ if (mouse_dy > 2048) mouse_dy = 2048; if (mouse_fasyncptr) - kill_fasync(mouse_fasyncptr, SIGIO); + kill_fasync(mouse_fasyncptr, SIGIO, POLL_IN); } mouse_byte_count=0; /* printk("mouse: %d, %d, %x\n",mouse_x,mouse_y,buttons); */ diff -urN linux.orig/drivers/char/drm/fops.c linux/drivers/char/drm/fops.c --- linux.orig/drivers/char/drm/fops.c Fri Nov 2 17:39:06 2001 +++ linux/drivers/char/drm/fops.c Wed Aug 7 17:45:22 2002 @@ -219,7 +219,7 @@ } #if LINUX_VERSION_CODE < 0x020400 - if (dev->buf_async) kill_fasync(dev->buf_async, SIGIO); + if (dev->buf_async) kill_fasync(dev->buf_async, SIGIO, POLL_IN); #else /* Type of first parameter changed in Linux 2.4.0-test2... */ diff -urN linux.orig/drivers/char/msbusmouse.c linux/drivers/char/msbusmouse.c --- linux.orig/drivers/char/msbusmouse.c Sun Mar 25 18:31:24 2001 +++ linux/drivers/char/msbusmouse.c Wed Aug 7 17:45:22 2002 @@ -89,7 +89,7 @@ mouse.ready = 1; wake_up_interruptible(&mouse.wait); if (mouse.fasyncptr) - kill_fasync(mouse.fasyncptr, SIGIO); + kill_fasync(mouse.fasyncptr, SIGIO, POLL_IN); } } diff -urN linux.orig/drivers/char/n_hdlc.c linux/drivers/char/n_hdlc.c --- linux.orig/drivers/char/n_hdlc.c Fri Nov 2 17:39:06 2001 +++ linux/drivers/char/n_hdlc.c Wed Aug 7 17:45:22 2002 @@ -584,11 +584,7 @@ wake_up_interruptible (&n_hdlc->read_wait); wake_up_interruptible (&n_hdlc->poll_wait); if (n_hdlc->tty->fasync != NULL) -#if LINUX_VERSION_CODE >= VERSION(2,2,14) && defined(__rh_config_h__) kill_fasync (n_hdlc->tty->fasync, SIGIO, POLL_IN); -#else - kill_fasync (n_hdlc->tty->fasync, SIGIO); -#endif } /* end of n_hdlc_tty_receive() */ /* n_hdlc_tty_read() diff -urN linux.orig/drivers/char/n_tty.c linux/drivers/char/n_tty.c --- linux.orig/drivers/char/n_tty.c Sun Mar 25 18:31:25 2001 +++ linux/drivers/char/n_tty.c Wed Aug 7 17:45:23 2002 @@ -635,7 +635,7 @@ tty->canon_head = tty->read_head; tty->canon_data++; if (tty->fasync) - kill_fasync(tty->fasync, SIGIO); + kill_fasync(tty->fasync, SIGIO, POLL_IN); if (tty->read_wait || tty->poll_wait) { wake_up_interruptible(&tty->read_wait); @@ -743,7 +743,7 @@ if (!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) { if (tty->fasync) - kill_fasync(tty->fasync, SIGIO); + kill_fasync(tty->fasync, SIGIO, POLL_IN); if (tty->read_wait||tty->poll_wait) { wake_up_interruptible(&tty->read_wait); diff -urN linux.orig/drivers/char/pc110pad.c linux/drivers/char/pc110pad.c --- linux.orig/drivers/char/pc110pad.c Fri Nov 2 17:39:06 2001 +++ linux/drivers/char/pc110pad.c Wed Aug 7 17:45:23 2002 @@ -75,7 +75,7 @@ { wake_up_interruptible(&queue); if(asyncptr) - kill_fasync(asyncptr, SIGIO); + kill_fasync(asyncptr, SIGIO, POLL_IN); } diff -urN linux.orig/drivers/char/pc_keyb.c linux/drivers/char/pc_keyb.c --- linux.orig/drivers/char/pc_keyb.c Sun Mar 25 18:31:26 2001 +++ linux/drivers/char/pc_keyb.c Wed Aug 7 17:45:23 2002 @@ -424,7 +424,7 @@ if (head != queue->tail) { queue->head = head; if (queue->fasync) - kill_fasync(queue->fasync, SIGIO); + kill_fasync(queue->fasync, SIGIO, POLL_IN); wake_up_interruptible(&queue->proc_list); } } diff -urN linux.orig/drivers/char/qpmouse.c linux/drivers/char/qpmouse.c --- linux.orig/drivers/char/qpmouse.c Sun Mar 25 18:31:26 2001 +++ linux/drivers/char/qpmouse.c Wed Aug 7 17:45:23 2002 @@ -134,7 +134,7 @@ } queue->head = head; if (queue->fasync) - kill_fasync(queue->fasync, SIGIO); + kill_fasync(queue->fasync, SIGIO, POLL_IN); wake_up_interruptible(&queue->proc_list); } diff -urN linux.orig/drivers/i2o/i2o_config.c linux/drivers/i2o/i2o_config.c --- linux.orig/drivers/i2o/i2o_config.c Sun Mar 25 18:31:44 2001 +++ linux/drivers/i2o/i2o_config.c Wed Aug 7 17:45:23 2002 @@ -164,7 +164,7 @@ // inf->fp, inf->q_id, inf->q_len); if(inf->fasync) - kill_fasync(inf->fasync, SIGIO); + kill_fasync(inf->fasync, SIGIO, POLL_IN); } return; diff -urN linux.orig/drivers/net/ppp.c linux/drivers/net/ppp.c --- linux.orig/drivers/net/ppp.c Sun Mar 25 18:31:15 2001 +++ linux/drivers/net/ppp.c Wed Aug 7 17:45:23 2002 @@ -2382,7 +2382,7 @@ wake_up_interruptible (&ppp->read_wait); if (ppp->tty->fasync != NULL) - kill_fasync (ppp->tty->fasync, SIGIO); + kill_fasync (ppp->tty->fasync, SIGIO, POLL_IN); return 1; } diff -urN linux.orig/drivers/sbus/char/pcikbd.c linux/drivers/sbus/char/pcikbd.c --- linux.orig/drivers/sbus/char/pcikbd.c Fri Nov 2 17:39:07 2001 +++ linux/drivers/sbus/char/pcikbd.c Wed Aug 7 17:45:23 2002 @@ -874,7 +874,7 @@ queue->head = head; aux_ready = 1; if (queue->fasync) - kill_fasync(queue->fasync, SIGIO); + kill_fasync(queue->fasync, SIGIO, POLL_IN); wake_up_interruptible(&queue->proc_list); } diff -urN linux.orig/drivers/sbus/char/sunkbd.c linux/drivers/sbus/char/sunkbd.c --- linux.orig/drivers/sbus/char/sunkbd.c Sun Mar 25 18:31:38 2001 +++ linux/drivers/sbus/char/sunkbd.c Wed Aug 7 17:45:23 2002 @@ -1278,7 +1278,7 @@ kbd_head = next; } if (kb_fasync) - kill_fasync (kb_fasync, SIGIO); + kill_fasync (kb_fasync, SIGIO, POLL_IN); wake_up_interruptible (&kbd_wait); } diff -urN linux.orig/drivers/sbus/char/sunmouse.c linux/drivers/sbus/char/sunmouse.c --- linux.orig/drivers/sbus/char/sunmouse.c Sun Mar 25 18:31:39 2001 +++ linux/drivers/sbus/char/sunmouse.c Wed Aug 7 17:45:23 2002 @@ -137,7 +137,7 @@ } sunmouse.ready = 1; if (sunmouse.fasync) - kill_fasync (sunmouse.fasync, SIGIO); + kill_fasync (sunmouse.fasync, SIGIO, POLL_IN); wake_up_interruptible (&sunmouse.proc_list); } @@ -365,7 +365,7 @@ */ sunmouse.ready = 1; if (sunmouse.fasync) - kill_fasync (sunmouse.fasync, SIGIO); + kill_fasync (sunmouse.fasync, SIGIO, POLL_IN); wake_up_interruptible(&sunmouse.proc_list); } return; diff -urN linux.orig/drivers/scsi/sg.c linux/drivers/scsi/sg.c --- linux.orig/drivers/scsi/sg.c Tue May 21 01:32:35 2002 +++ linux/drivers/scsi/sg.c Wed Aug 7 17:45:23 2002 @@ -831,7 +831,7 @@ if (sfp && srp) { wake_up_interruptible(&sfp->read_wait); if (sfp->async_qp) - kill_fasync(sfp->async_qp, SIGPOLL); + kill_fasync(sfp->async_qp, SIGIO, POLL_IN); } } diff -urN linux.orig/drivers/sgi/char/shmiq.c linux/drivers/sgi/char/shmiq.c --- linux.orig/drivers/sgi/char/shmiq.c Sun Mar 25 18:31:42 2001 +++ linux/drivers/sgi/char/shmiq.c Wed Aug 7 17:45:23 2002 @@ -118,7 +118,7 @@ s->tail = tail_next; shmiqs [device].tail = tail_next; if (shmiqs [device].fasync) - kill_fasync (shmiqs [device].fasync, SIGIO); + kill_fasync (shmiqs [device].fasync, SIGIO, POLL_IN); wake_up_interruptible (&shmiqs [device].proc_list); } diff -urN linux.orig/drivers/telephony/ixj.c linux/drivers/telephony/ixj.c --- linux.orig/drivers/telephony/ixj.c Sun Mar 25 18:31:44 2001 +++ linux/drivers/telephony/ixj.c Wed Aug 7 17:45:23 2002 @@ -536,7 +536,7 @@ extern __inline__ void ixj_kill_fasync(int board) { if (ixj[board].async_queue) - kill_fasync(ixj[board].async_queue, SIGIO); // Send apps notice of change + kill_fasync(ixj[board].async_queue, SIGIO, POLL_IN); // Send apps notice of change } static void ixj_timeout(unsigned long ptr) diff -urN linux.orig/drivers/usb/evdev.c linux/drivers/usb/evdev.c --- linux.orig/drivers/usb/evdev.c Sun Mar 25 18:31:43 2001 +++ linux/drivers/usb/evdev.c Wed Aug 7 17:45:23 2002 @@ -74,7 +74,7 @@ list->buffer[list->head].value = value; list->head = (list->head + 1) & (EVDEV_BUFFER_SIZE - 1); - kill_fasync(list->fasync, SIGIO); + kill_fasync(list->fasync, SIGIO, POLL_IN); list = list->next; } diff -urN linux.orig/drivers/usb/joydev.c linux/drivers/usb/joydev.c --- linux.orig/drivers/usb/joydev.c Sun Mar 25 18:31:43 2001 +++ linux/drivers/usb/joydev.c Wed Aug 7 17:45:23 2002 @@ -143,7 +143,7 @@ if (list->tail == (list->head = (list->head + 1) & (JOYDEV_BUFFER_SIZE - 1))) list->startup = 0; - kill_fasync(list->fasync, SIGIO); + kill_fasync(list->fasync, SIGIO, POLL_IN); list = list->next; } diff -urN linux.orig/drivers/usb/mousedev.c linux/drivers/usb/mousedev.c --- linux.orig/drivers/usb/mousedev.c Sun Mar 25 18:31:42 2001 +++ linux/drivers/usb/mousedev.c Wed Aug 7 17:45:23 2002 @@ -143,7 +143,7 @@ list->ready = 1; - kill_fasync(list->fasync, SIGIO); + kill_fasync(list->fasync, SIGIO, POLL_IN); list = list->next; } @@ -321,7 +321,7 @@ list->buffer = list->bufsiz; } - kill_fasync(list->fasync, SIGIO); + kill_fasync(list->fasync, SIGIO, POLL_IN); wake_up_interruptible(&list->mousedev->wait); diff -urN linux.orig/fs/buffer.c linux/fs/buffer.c --- linux.orig/fs/buffer.c Sun Mar 25 18:37:38 2001 +++ linux/fs/buffer.c Wed Aug 7 17:52:06 2002 @@ -123,7 +123,7 @@ /* These are the min and max parameter values that we will allow to be assigned */ int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 1*HZ, 1*HZ, 1, 1}; -int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,60*HZ, 600*HZ, 600*HZ, 2047, 5}; +int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,INT_MAX, 600*HZ, 600*HZ, 2047, 5}; void wakeup_bdflush(int); @@ -144,13 +144,13 @@ bh->b_count++; wait.task = tsk; add_wait_queue(&bh->b_wait, &wait); -repeat: - tsk->state = TASK_UNINTERRUPTIBLE; - run_task_queue(&tq_disk); - if (buffer_locked(bh)) { + do { + set_current_state(TASK_UNINTERRUPTIBLE); + run_task_queue(&tq_disk); + if (!buffer_locked(bh)) + break; schedule(); - goto repeat; - } + } while (buffer_locked(bh)); tsk->state = TASK_RUNNING; remove_wait_queue(&bh->b_wait, &wait); bh->b_count--; @@ -1530,9 +1530,13 @@ struct buffer_head *p = tmp; tmp = tmp->b_this_page; - if (buffer_dirty(p)) - if (test_and_set_bit(BH_Wait_IO, &p->b_state)) - ll_rw_block(WRITE, 1, &p); + if (buffer_dirty(p) || buffer_locked(p)) + if (test_and_set_bit(BH_Wait_IO, &p->b_state)) { + if (buffer_dirty(p)) + ll_rw_block(WRITE, 1, &p); + else if (buffer_locked(p)) + wait_on_buffer(p); + } } while (tmp != bh); /* Restore the visibility of the page before returning. */ @@ -1788,7 +1792,6 @@ if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount); printk("Wrote %d/%d buffers\n", nwritten, ndirty); #endif - run_task_queue(&tq_disk); return 0; } @@ -1986,13 +1989,18 @@ tsk->session = 1; tsk->pgrp = 1; strcpy(tsk->comm, "kupdate"); + + /* sigstop and sigcont will stop and wakeup kupdate */ + spin_lock_irq(&tsk->sigmask_lock); sigfillset(&tsk->blocked); - /* sigcont will wakeup kupdate after setting interval to 0 */ sigdelset(&tsk->blocked, SIGCONT); + sigdelset(&tsk->blocked, SIGSTOP); + spin_unlock_irq(&tsk->sigmask_lock); lock_kernel(); for (;;) { + /* update interval */ interval = bdf_prm.b_un.interval; if (interval) { @@ -2001,9 +2009,25 @@ } else { + stop_kupdate: tsk->state = TASK_STOPPED; schedule(); /* wait for SIGCONT */ } + /* check for sigstop */ + if (signal_pending(tsk)) + { + int stopped = 0; + spin_lock_irq(&tsk->sigmask_lock); + if (sigismember(&tsk->signal, SIGSTOP)) + { + sigdelset(&tsk->signal, SIGSTOP); + stopped = 1; + } + recalc_sigpending(tsk); + spin_unlock_irq(&tsk->sigmask_lock); + if (stopped) + goto stop_kupdate; + } #ifdef DEBUG printk("kupdate() activated...\n"); #endif diff -urN linux.orig/fs/ext2/inode.c linux/fs/ext2/inode.c --- linux.orig/fs/ext2/inode.c Sun Mar 25 18:30:58 2001 +++ linux/fs/ext2/inode.c Wed Aug 7 17:54:11 2002 @@ -739,7 +739,7 @@ } #if BITS_PER_LONG == 64 - if (size >> 33) { + if (size >> 31) { struct super_block *sb = inode->i_sb; struct ext2_super_block *es = sb->u.ext2_sb.s_es; if (!(es->s_feature_ro_compat & diff -urN linux.orig/fs/fcntl.c linux/fs/fcntl.c --- linux.orig/fs/fcntl.c Sun Mar 25 18:30:58 2001 +++ linux/fs/fcntl.c Wed Aug 7 17:45:23 2002 @@ -8,6 +8,8 @@ #include #include +#include +#include #include extern int sock_fcntl (struct file *, unsigned int cmd, unsigned long arg); @@ -223,7 +225,19 @@ return err; } -static void send_sigio(struct fown_struct *fown, struct fasync_struct *fa) +/* Table to convert sigio signal codes into poll band bitmaps */ + +static int band_table[NSIGPOLL] = { + POLLIN | POLLRDNORM, /* POLL_IN */ + POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ + POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ + POLLERR, /* POLL_ERR */ + POLLPRI | POLLRDBAND, /* POLL_PRI */ + POLLHUP | POLLERR /* POLL_HUP */ +}; + +static void send_sigio(struct fown_struct *fown, struct fasync_struct *fa, + int reason) { struct task_struct * p; int pid = fown->pid; @@ -252,9 +266,12 @@ back to SIGIO in that case. --sct */ si.si_signo = fown->signum; si.si_errno = 0; - si.si_code = SI_SIGIO; - si.si_pid = pid; - si.si_uid = uid; + si.si_code = reason; + if (reason - POLL_IN >= NSIGPOLL || + reason <= 0) + panic("send_sigio got `reason' != POLL_*"); + else + si.si_band = band_table[reason - POLL_IN]; si.si_fd = fa->fa_fd; if (!send_sig_info(fown->signum, &si, p)) break; @@ -266,7 +283,7 @@ read_unlock(&tasklist_lock); } -void kill_fasync(struct fasync_struct *fa, int sig) +void kill_fasync(struct fasync_struct *fa, int sig, int band) { while (fa) { struct fown_struct * fown; @@ -276,8 +293,11 @@ return; } fown = &fa->fa_file->f_owner; - if (fown->pid) - send_sigio(fown, fa); + /* Don't send SIGURG to processes which have not set a + queued signum: SIGURG has its own default signalling + mechanism. */ + if (fown->pid && !(sig == SIGURG && fown->signum == 0)) + send_sigio(fown, fa, band); fa = fa->fa_next; } } diff -urN linux.orig/fs/inode.c linux/fs/inode.c --- linux.orig/fs/inode.c Sun Mar 25 18:37:38 2001 +++ linux/fs/inode.c Wed Aug 7 17:57:41 2002 @@ -11,6 +11,8 @@ #include #include #include +#include +#include /* * New inode.c implementation. @@ -29,9 +31,8 @@ * Inode lookup is no longer as critical as it used to be: * most of the lookups are going to be through the dcache. */ -#define HASH_BITS 8 -#define HASH_SIZE (1UL << HASH_BITS) -#define HASH_MASK (HASH_SIZE-1) +#define HASH_BITS i_hash_bits +#define HASH_MASK i_hash_mask /* * Each inode can be on two separate lists. One is @@ -47,7 +48,9 @@ LIST_HEAD(inode_in_use); static LIST_HEAD(inode_unused); -static struct list_head inode_hashtable[HASH_SIZE]; +static unsigned int i_hash_bits; +static unsigned int i_hash_mask; +static struct list_head *inode_hashtable; __u32 inode_generation_count = 0; @@ -569,6 +572,7 @@ inode->i_writecount = 0; inode->i_size = 0; inode->i_generation = 0; + inode->i_blocks = 0; memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); sema_init(&inode->i_sem, 1); } @@ -680,7 +684,7 @@ static inline unsigned long hash(struct super_block *sb, unsigned long i_ino) { - unsigned long tmp = i_ino | (unsigned long) sb; + unsigned long tmp = i_ino + (unsigned long) sb / (sizeof(struct super_block) & ~(sizeof(struct super_block) - 1)); tmp = tmp + (tmp >> HASH_BITS); return tmp & HASH_MASK; } @@ -835,30 +839,85 @@ return 0; } -/* - * Initialize the hash tables and default - * value for max inodes - */ -#define MAX_INODE (16384) - void __init inode_init(void) { - int i, max; - struct list_head *head = inode_hashtable; + int i, order; + struct list_head *d; + unsigned long nr_hash, hash_size, tmp; + +#ifndef CONFIG_BIGMEM + nr_hash = num_physpages; +#else + nr_hash = bigmem_mapnr; +#endif + nr_hash <<= PAGE_SHIFT; + nr_hash >>= 13; + + /* scale logaritmically over 32768 inodes */ + if (nr_hash > 16384) { + if (nr_hash > 32768) + nr_hash >>= 1; + else + nr_hash = 16384; + } + if (nr_hash > 32768) { + if (nr_hash > 65536) + nr_hash >>= 1; + else + nr_hash = 32768; + } + + /* This limit triggers with more than 1G of RAM */ + if (nr_hash > 65536) + nr_hash = 65536; + + max_inodes = nr_hash; + + hash_size = nr_hash * sizeof(struct list_head); + + if (hash_size < PAGE_SIZE) { + /* Embedded systems */ + inode_hashtable = kmalloc(hash_size, GFP_ATOMIC); + + i_hash_mask = (nr_hash - 1); - i = HASH_SIZE; + tmp = nr_hash; + i_hash_bits = 0; + while((tmp >>= 1UL) != 0UL) + i_hash_bits++; + } else { + for (order = 0; ((1UL << order) << PAGE_SHIFT) < hash_size; + order++); + + do { + hash_size = 1UL << (order+PAGE_SHIFT); + nr_hash = hash_size / sizeof(struct list_head); + + i_hash_mask = (nr_hash - 1); + + tmp = nr_hash; + i_hash_bits = 0; + while((tmp >>= 1UL) != 0UL) + i_hash_bits++; + + inode_hashtable = (struct list_head *) __get_free_pages(GFP_ATOMIC, order); + } while(inode_hashtable == NULL && --order >= 0); + } + + printk("Inode hash table entries: %lu (%ldk), inode-max: %d\n", + nr_hash, hash_size >> 10, max_inodes); + + if (!inode_hashtable) + panic("Failed to allocate inode hash table\n"); + + d = inode_hashtable; + i = nr_hash; do { - INIT_LIST_HEAD(head); - head++; + INIT_LIST_HEAD(d); + d++; i--; } while (i); - /* Initial guess at reasonable inode number */ - max = num_physpages >> 1; - if (max > MAX_INODE) - max = MAX_INODE; - max_inodes = max; - /* Get a random number. */ get_random_bytes (&inode_generation_count, sizeof (inode_generation_count)); diff -urN linux.orig/fs/proc/fd.c linux/fs/proc/fd.c --- linux.orig/fs/proc/fd.c Sun Mar 25 18:30:58 2001 +++ linux/fs/proc/fd.c Wed Aug 7 17:51:29 2002 @@ -87,7 +87,6 @@ fd = 0; len = dentry->d_name.len; name = dentry->d_name.name; - if (len > 1 && *name == '0') goto out; while (len-- > 0) { c = *name - '0'; name++; diff -urN linux.orig/fs/proc/root.c linux/fs/proc/root.c --- linux.orig/fs/proc/root.c Tue May 21 01:32:35 2002 +++ linux/fs/proc/root.c Wed Aug 7 17:51:29 2002 @@ -844,7 +844,6 @@ } pid *= 10; pid += c; - if (!pid) break; if (pid & 0xffff0000) { pid = 0; break; diff -urN linux.orig/fs/select.c linux/fs/select.c --- linux.orig/fs/select.c Sun Mar 25 18:30:58 2001 +++ linux/fs/select.c Wed Aug 7 17:55:44 2002 @@ -412,7 +412,7 @@ lock_kernel(); /* Do a sanity check on nfds ... */ err = -EINVAL; - if (nfds > current->files->max_fds) + if (nfds > current->rlim[RLIMIT_NOFILE].rlim_cur) goto out; if (timeout) { diff -urN linux.orig/fs/smbfs/sock.c linux/fs/smbfs/sock.c --- linux.orig/fs/smbfs/sock.c Sun Mar 25 18:30:59 2001 +++ linux/fs/smbfs/sock.c Wed Aug 7 17:45:23 2002 @@ -96,7 +96,7 @@ */ if(!sk->dead) { wake_up_interruptible(sk->sleep); - sock_wake_async(sk->socket,1); + sock_wake_async(sk->socket,1,POLL_IN); } } diff -urN linux.orig/include/asm-i386/bugs.h linux/include/asm-i386/bugs.h --- linux.orig/include/asm-i386/bugs.h Sun Mar 25 18:37:39 2001 +++ linux/include/asm-i386/bugs.h Wed Aug 7 17:44:43 2002 @@ -8,6 +8,9 @@ * * - Channing Corn (tests & fixes), * - Andrew D. Balsa (code cleanup). + * + * Pentium III FXSR, SSE support + * Gareth Hughes , May 2000 */ /* @@ -20,6 +23,7 @@ #include #include #include +#include #define CONFIG_BUGi386 @@ -69,6 +73,25 @@ #endif return; } + + /* + * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned. + */ + if (offsetof(struct task_struct, tss.i387.fxsave) & 15) { + extern void __buggy_fxsr_alignment(void); + __buggy_fxsr_alignment(); + } + if (cpu_has_fxsr) { + printk(KERN_INFO "Enabling fast FPU save and restore... "); + set_in_cr4(X86_CR4_OSFXSR); + printk("done.\n"); + } + if (cpu_has_xmm) { + printk(KERN_INFO "Enabling unmasked SIMD FPU exception support... "); + set_in_cr4(X86_CR4_OSXMMEXCPT); + printk("done.\n"); + } + if (mca_pentium_flag) { /* The IBM Model 95 machines with pentiums lock up on * fpu test, so we avoid it. All pentiums have inbuilt diff -urN linux.orig/include/asm-i386/cache.h linux/include/asm-i386/cache.h --- linux.orig/include/asm-i386/cache.h Sun Mar 25 18:31:05 2001 +++ linux/include/asm-i386/cache.h Wed Aug 7 17:44:10 2002 @@ -5,7 +5,11 @@ #define __ARCH_I386_CACHE_H /* bytes per L1 cache line */ -#if CPU==586 || CPU==686 +#ifdef CONFIG_M686_L1_64 +#define L1_CACHE_BYTES 64 +#elif defined(CONFIG_M686_L1_128) +#define L1_CACHE_BYTES 128 +#elif CPU==586 || CPU==686 #define L1_CACHE_BYTES 32 #else #define L1_CACHE_BYTES 16 diff -urN linux.orig/include/asm-i386/elf.h linux/include/asm-i386/elf.h --- linux.orig/include/asm-i386/elf.h Sun Mar 25 18:31:05 2001 +++ linux/include/asm-i386/elf.h Wed Aug 7 17:44:43 2002 @@ -15,6 +15,7 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_i387_struct elf_fpregset_t; +typedef struct user_fxsr_struct elf_fpxregset_t; /* * This is used to ensure we don't load something for the wrong architecture. diff -urN linux.orig/include/asm-i386/i387.h linux/include/asm-i386/i387.h --- linux.orig/include/asm-i386/i387.h Thu Jan 1 01:00:00 1970 +++ linux/include/asm-i386/i387.h Wed Aug 7 17:44:43 2002 @@ -0,0 +1,83 @@ +/* + * include/asm-i386/i387.h + * + * Copyright (C) 1994 Linus Torvalds + * + * Pentium III FXSR, SSE support + * General FPU state handling cleanups + * Gareth Hughes , May 2000 + */ + +#ifndef __ASM_I386_I387_H +#define __ASM_I386_I387_H + +#include +#include +#include +#include + +extern void init_fpu(void); +/* + * FPU lazy state save handling... + */ +extern void save_init_fpu( struct task_struct *tsk ); +extern void restore_fpu( struct task_struct *tsk ); + +#define unlazy_fpu( tsk ) do { \ + if ( tsk->flags & PF_USEDFPU ) \ + save_init_fpu( tsk ); \ +} while (0) + +#define clear_fpu( tsk ) do { \ + if ( tsk->flags & PF_USEDFPU ) { \ + tsk->flags &= ~PF_USEDFPU; \ + stts(); \ + } \ +} while (0) + +/* + * FPU state interaction... + */ +extern unsigned short get_fpu_cwd( struct task_struct *tsk ); +extern unsigned short get_fpu_swd( struct task_struct *tsk ); +extern unsigned short get_fpu_twd( struct task_struct *tsk ); +extern unsigned short get_fpu_mxcsr( struct task_struct *tsk ); + +extern void set_fpu_cwd( struct task_struct *tsk, unsigned short cwd ); +extern void set_fpu_swd( struct task_struct *tsk, unsigned short swd ); +extern void set_fpu_twd( struct task_struct *tsk, unsigned short twd ); +extern void set_fpu_mxcsr( struct task_struct *tsk, unsigned short mxcsr ); + +#define load_mxcsr( val ) do { \ + unsigned long __mxcsr = ((unsigned long)(val) & 0xffbf); \ + asm volatile( "ldmxcsr %0" : : "m" (__mxcsr) ); \ +} while (0) + +/* + * Signal frame handlers... + */ +extern int save_i387( struct _fpstate *buf ); +extern int restore_i387( struct _fpstate *buf ); + +/* + * ptrace request handers... + */ +extern int get_fpregs( struct user_i387_struct *buf, + struct task_struct *tsk ); +extern int set_fpregs( struct task_struct *tsk, + struct user_i387_struct *buf ); + +extern int get_fpxregs( struct user_fxsr_struct *buf, + struct task_struct *tsk ); +extern int set_fpxregs( struct task_struct *tsk, + struct user_fxsr_struct *buf ); + +/* + * FPU state for core dumps... + */ +extern int dump_fpu( struct pt_regs *regs, + struct user_i387_struct *fpu ); +extern int dump_extended_fpu( struct pt_regs *regs, + struct user_fxsr_struct *fpu ); + +#endif /* __ASM_I386_I387_H */ diff -urN linux.orig/include/asm-i386/processor.h linux/include/asm-i386/processor.h --- linux.orig/include/asm-i386/processor.h Tue May 21 01:32:35 2002 +++ linux/include/asm-i386/processor.h Wed Aug 7 17:44:43 2002 @@ -40,7 +40,7 @@ unsigned long *pgd_quick; unsigned long *pte_quick; unsigned long pgtable_cache_sz; -}; +} __attribute__((__aligned__(SMP_CACHE_BYTES))); #define X86_VENDOR_INTEL 0 #define X86_VENDOR_CYRIX 1 @@ -82,7 +82,7 @@ #define X86_FEATURE_22 0x00400000 #define X86_FEATURE_MMX 0x00800000 /* multimedia extensions */ #define X86_FEATURE_FXSR 0x01000000 /* FXSAVE and FXRSTOR instructions (fast save and restore of FPU context), and CR4.OSFXSR (OS uses these instructions) available */ -#define X86_FEATURE_25 0x02000000 +#define X86_FEATURE_XMM 0x02000000 /* Intel MMX2 instruction set */ #define X86_FEATURE_26 0x04000000 #define X86_FEATURE_27 0x08000000 #define X86_FEATURE_28 0x10000000 @@ -100,6 +100,23 @@ #define current_cpu_data boot_cpu_data #endif +#define cpu_has_pge \ + (boot_cpu_data.x86_capability & X86_FEATURE_PGE) +#define cpu_has_pse \ + (boot_cpu_data.x86_capability & X86_FEATURE_PSE) +#define cpu_has_pae \ + (boot_cpu_data.x86_capability & X86_FEATURE_PAE) +#define cpu_has_tsc \ + (boot_cpu_data.x86_capability & X86_FEATURE_TSC) +#define cpu_has_de \ + (boot_cpu_data.x86_capability & X86_FEATURE_DE) +#define cpu_has_vme \ + (boot_cpu_data.x86_capability & X86_FEATURE_VME) +#define cpu_has_fxsr \ + ((boot_cpu_data.x86_vendor != X86_VENDOR_CYRIX) && (boot_cpu_data.x86_capability & X86_FEATURE_FXSR)) +#define cpu_has_xmm \ + (boot_cpu_data.x86_capability & X86_FEATURE_XMM) + extern char ignore_irq13; extern void identify_cpu(struct cpuinfo_x86 *); @@ -184,6 +201,49 @@ return edx; } /* + * Intel CPU features in CR4 + */ +#define X86_CR4_VME 0x0001 /* enable vm86 extensions */ +#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */ +#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */ +#define X86_CR4_DE 0x0008 /* enable debugging extensions */ +#define X86_CR4_PSE 0x0010 /* enable page size extensions */ +#define X86_CR4_PAE 0x0020 /* enable physical address extensions */ +#define X86_CR4_MCE 0x0040 /* Machine check enable */ +#define X86_CR4_PGE 0x0080 /* enable global pages */ +#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */ +#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */ +#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */ + +/* + * Save the cr4 feature set we're using (ie + * Pentium 4MB enable and PPro Global page + * enable), so that any CPU's that boot up + * after us can get the correct flags. + */ +extern unsigned long x86_cr4; + +static inline void set_in_cr4 (unsigned long mask) +{ + x86_cr4 |= mask; + __asm__("movl %%cr4,%%eax\n\t" + "orl %0,%%eax\n\t" + "movl %%eax,%%cr4\n" + : : "irg" (mask) + :"ax"); +} + +static inline void clear_in_cr4 (unsigned long mask) +{ + x86_cr4 &= ~mask; + __asm__("movl %%cr4,%%eax\n\t" + "andl %0,%%eax\n\t" + "movl %%eax,%%cr4\n" + : : "irg" (~mask) + :"ax"); +} + +/* * Cyrix CPU configuration register indexes */ #define CX86_CCR0 0xc0 @@ -211,39 +271,6 @@ } while (0) /* - * * Intel CPU features in CR4 - * */ -#define X86_CR4_VME 0x0001 /* enable vm86 extensions */ -#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */ -#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */ -#define X86_CR4_DE 0x0008 /* enable debugging extensions */ -#define X86_CR4_PSE 0x0010 /* enable page size extensions */ -#define X86_CR4_PAE 0x0020 /* enable physical address extensions */ -#define X86_CR4_MCE 0x0040 /* Machine check enable */ -#define X86_CR4_PGE 0x0080 /* enable global pages */ -#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */ -#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */ -#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */ - -/* - * Save the cr4 feature set we're using (ie - * Pentium 4MB enable and PPro Global page - * enable), so that any CPU's that boot up - * after us can get the correct flags. - */ -extern unsigned long mmu_cr4_features; - -static inline void set_in_cr4 (unsigned long mask) -{ - mmu_cr4_features |= mask; - __asm__("movl %%cr4,%%eax\n\t" - "orl %0,%%eax\n\t" - "movl %%eax,%%cr4\n" - : : "irg" (mask) - :"ax"); -} - -/* * Bus types (default is ISA, but people can check others with these..) */ extern int EISA_bus; @@ -271,7 +298,7 @@ */ #define IO_BITMAP_SIZE 32 -struct i387_hard_struct { +struct i387_fsave_struct { long cwd; long swd; long twd; @@ -283,22 +310,42 @@ long status; /* software status information */ }; -struct i387_soft_struct { - long cwd; - long swd; - long twd; +/* + * has to be 128-bit aligned + */ +struct i387_fxsave_struct { + unsigned short cwd; + unsigned short swd; + unsigned short twd; + unsigned short fop; long fip; long fcs; long foo; long fos; - long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ - unsigned char ftop, changed, lookahead, no_update, rm, alimit; - struct info *info; - unsigned long entry_eip; + long mxcsr; + long reserved; + long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ + long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ + long padding[56]; +} __attribute__ ((aligned (16))); + +struct i387_soft_struct { + long cwd; + long swd; + long twd; + long fip; + long fcs; + long foo; + long fos; + long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ + unsigned char ftop, changed, lookahead, no_update, rm, alimit; + struct info *info; + unsigned long entry_eip; }; union i387_union { - struct i387_hard_struct hard; + struct i387_fsave_struct fsave; + struct i387_fxsave_struct fxsave; struct i387_soft_struct soft; }; @@ -391,27 +438,6 @@ extern void forget_segments(void); /* - * FPU lazy state save handling.. - */ -#define save_fpu(tsk) do { \ - asm volatile("fnsave %0\n\tfwait":"=m" (tsk->tss.i387)); \ - tsk->flags &= ~PF_USEDFPU; \ - stts(); \ -} while (0) - -#define unlazy_fpu(tsk) do { \ - if (tsk->flags & PF_USEDFPU) \ - save_fpu(tsk); \ -} while (0) - -#define clear_fpu(tsk) do { \ - if (tsk->flags & PF_USEDFPU) { \ - tsk->flags &= ~PF_USEDFPU; \ - stts(); \ - } \ -} while (0) - -/* * Return saved PC of a blocked thread. */ extern inline unsigned long thread_saved_pc(struct thread_struct *t) diff -urN linux.orig/include/asm-i386/ptrace.h linux/include/asm-i386/ptrace.h --- linux.orig/include/asm-i386/ptrace.h Sun Mar 25 18:31:05 2001 +++ linux/include/asm-i386/ptrace.h Wed Aug 7 17:44:43 2002 @@ -46,6 +46,8 @@ #define PTRACE_SETREGS 13 #define PTRACE_GETFPREGS 14 #define PTRACE_SETFPREGS 15 +#define PTRACE_GETFPXREGS 18 +#define PTRACE_SETFPXREGS 19 #ifdef __KERNEL__ #define user_mode(regs) ((VM_MASK & (regs)->eflags) || (3 & (regs)->xcs)) diff -urN linux.orig/include/asm-i386/sigcontext.h linux/include/asm-i386/sigcontext.h --- linux.orig/include/asm-i386/sigcontext.h Sun Mar 25 18:31:05 2001 +++ linux/include/asm-i386/sigcontext.h Wed Aug 7 17:44:43 2002 @@ -8,24 +8,53 @@ * normal i387 hardware setup, the extra "status" * word is used to save the coprocessor status word * before entering the handler. + * + * Pentium III FXSR, SSE support + * Gareth Hughes , May 2000 + * + * The FPU state data structure has had to grow to accomodate the + * extended FPU state required by the Streaming SIMD Extensions. + * There is no documented standard to accomplish this at the moment. */ struct _fpreg { unsigned short significand[4]; unsigned short exponent; }; +struct _fpxreg { + unsigned short significand[4]; + unsigned short exponent; + unsigned short padding[3]; +}; + +struct _xmmreg { + unsigned long element[4]; +}; + struct _fpstate { - unsigned long cw, - sw, - tag, - ipoff, - cssel, - dataoff, - datasel; + /* Regular FPU environment */ + unsigned long cw; + unsigned long sw; + unsigned long tag; + unsigned long ipoff; + unsigned long cssel; + unsigned long dataoff; + unsigned long datasel; struct _fpreg _st[8]; - unsigned long status; + unsigned short status; + unsigned short magic; /* 0xffff = regular FPU data only */ + + /* FXSR FPU environment */ + unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */ + unsigned long mxcsr; + unsigned long reserved; + struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ + struct _xmmreg _xmm[8]; + unsigned long padding[56]; }; +#define X86_FXSR_MAGIC 0x0000 + struct sigcontext { unsigned short gs, __gsh; unsigned short fs, __fsh; diff -urN linux.orig/include/asm-i386/siginfo.h linux/include/asm-i386/siginfo.h --- linux.orig/include/asm-i386/siginfo.h Sun Mar 25 18:37:39 2001 +++ linux/include/asm-i386/siginfo.h Wed Aug 7 17:44:43 2002 @@ -77,6 +77,25 @@ #define si_band _sifields._sigpoll._band #define si_fd _sifields._sigpoll._fd +#ifdef __KERNEL__ +#define __SI_MASK 0xffff0000 +#define __SI_KILL (0 << 16) +#define __SI_TIMER (1 << 16) +#define __SI_POLL (2 << 16) +#define __SI_FAULT (3 << 16) +#define __SI_CHLD (4 << 16) +#define __SI_RT (5 << 16) +#define __SI_CODE(T,N) ((T) << 16 | ((N) & 0xffff)) +#else +#define __SI_KILL 0 +#define __SI_TIMER 0 +#define __SI_POLL 0 +#define __SI_FAULT 0 +#define __SI_CHLD 0 +#define __SI_RT 0 +#define __SI_CODE(T,N) (N) +#endif + /* * si_code values * Digital reserves positive values for kernel-generated signals. diff -urN linux.orig/include/asm-i386/user.h linux/include/asm-i386/user.h --- linux.orig/include/asm-i386/user.h Sun Mar 25 18:31:05 2001 +++ linux/include/asm-i386/user.h Wed Aug 7 17:44:43 2002 @@ -30,6 +30,18 @@ The minimum core file size is 3 pages, or 12288 bytes. */ +/* + * Pentium III FXSR, SSE support + * Gareth Hughes , May 2000 + * + * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for + * interacting with the FXSR-format floating point environment. Floating + * point data can be accessed in the regular format in the usual manner, + * and both the standard and SIMD floating point data can be accessed via + * the new ptrace requests. In either case, changes to the FPU environment + * will be reflected in the task's state as expected. + */ + struct user_i387_struct { long cwd; long swd; @@ -41,6 +53,22 @@ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ }; +struct user_fxsr_struct { + unsigned short cwd; + unsigned short swd; + unsigned short twd; + unsigned short fop; + long fip; + long fcs; + long foo; + long fos; + long mxcsr; + long reserved; + long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ + long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ + long padding[56]; +}; + /* * This is the old layout of "struct pt_regs", and * is still the layout used by user mode (the new diff -urN linux.orig/include/asm-sparc/asm_offsets.h linux/include/asm-sparc/asm_offsets.h --- linux.orig/include/asm-sparc/asm_offsets.h Fri Nov 2 17:39:08 2001 +++ linux/include/asm-sparc/asm_offsets.h Wed Aug 7 17:55:29 2002 @@ -26,179 +26,181 @@ #define ASIZ_task_priority 0x00000004 #define AOFF_task_avg_slice 0x00000024 #define ASIZ_task_avg_slice 0x00000004 -#define AOFF_task_has_cpu 0x00000028 +#define AOFF_task_counter_refresh 0x00000028 +#define ASIZ_task_counter_refresh 0x00000004 +#define AOFF_task_has_cpu 0x0000002c #define ASIZ_task_has_cpu 0x00000004 -#define AOFF_task_processor 0x0000002c +#define AOFF_task_processor 0x00000030 #define ASIZ_task_processor 0x00000004 -#define AOFF_task_last_processor 0x00000030 +#define AOFF_task_last_processor 0x00000034 #define ASIZ_task_last_processor 0x00000004 -#define AOFF_task_lock_depth 0x00000034 +#define AOFF_task_lock_depth 0x00000038 #define ASIZ_task_lock_depth 0x00000004 -#define AOFF_task_next_task 0x00000038 +#define AOFF_task_next_task 0x0000003c #define ASIZ_task_next_task 0x00000004 -#define AOFF_task_prev_task 0x0000003c +#define AOFF_task_prev_task 0x00000040 #define ASIZ_task_prev_task 0x00000004 -#define AOFF_task_next_run 0x00000040 +#define AOFF_task_next_run 0x00000044 #define ASIZ_task_next_run 0x00000004 -#define AOFF_task_prev_run 0x00000044 +#define AOFF_task_prev_run 0x00000048 #define ASIZ_task_prev_run 0x00000004 -#define AOFF_task_task_exclusive 0x00000048 +#define AOFF_task_task_exclusive 0x0000004c #define ASIZ_task_task_exclusive 0x00000004 -#define AOFF_task_binfmt 0x0000004c +#define AOFF_task_binfmt 0x00000050 #define ASIZ_task_binfmt 0x00000004 -#define AOFF_task_exit_code 0x00000050 +#define AOFF_task_exit_code 0x00000054 #define ASIZ_task_exit_code 0x00000004 -#define AOFF_task_exit_signal 0x00000054 +#define AOFF_task_exit_signal 0x00000058 #define ASIZ_task_exit_signal 0x00000004 -#define AOFF_task_pdeath_signal 0x00000058 +#define AOFF_task_pdeath_signal 0x0000005c #define ASIZ_task_pdeath_signal 0x00000004 -#define AOFF_task_personality 0x0000005c +#define AOFF_task_personality 0x00000060 #define ASIZ_task_personality 0x00000004 -#define AOFF_task_pid 0x00000064 +#define AOFF_task_pid 0x00000068 #define ASIZ_task_pid 0x00000004 -#define AOFF_task_pgrp 0x00000068 +#define AOFF_task_pgrp 0x0000006c #define ASIZ_task_pgrp 0x00000004 -#define AOFF_task_tty_old_pgrp 0x0000006c +#define AOFF_task_tty_old_pgrp 0x00000070 #define ASIZ_task_tty_old_pgrp 0x00000004 -#define AOFF_task_session 0x00000070 +#define AOFF_task_session 0x00000074 #define ASIZ_task_session 0x00000004 -#define AOFF_task_leader 0x00000074 +#define AOFF_task_leader 0x00000078 #define ASIZ_task_leader 0x00000004 -#define AOFF_task_p_opptr 0x00000078 +#define AOFF_task_p_opptr 0x0000007c #define ASIZ_task_p_opptr 0x00000004 -#define AOFF_task_p_pptr 0x0000007c +#define AOFF_task_p_pptr 0x00000080 #define ASIZ_task_p_pptr 0x00000004 -#define AOFF_task_p_cptr 0x00000080 +#define AOFF_task_p_cptr 0x00000084 #define ASIZ_task_p_cptr 0x00000004 -#define AOFF_task_p_ysptr 0x00000084 +#define AOFF_task_p_ysptr 0x00000088 #define ASIZ_task_p_ysptr 0x00000004 -#define AOFF_task_p_osptr 0x00000088 +#define AOFF_task_p_osptr 0x0000008c #define ASIZ_task_p_osptr 0x00000004 -#define AOFF_task_pidhash_next 0x0000008c +#define AOFF_task_pidhash_next 0x00000090 #define ASIZ_task_pidhash_next 0x00000004 -#define AOFF_task_pidhash_pprev 0x00000090 +#define AOFF_task_pidhash_pprev 0x00000094 #define ASIZ_task_pidhash_pprev 0x00000004 -#define AOFF_task_tarray_ptr 0x00000094 +#define AOFF_task_tarray_ptr 0x00000098 #define ASIZ_task_tarray_ptr 0x00000004 -#define AOFF_task_wait_chldexit 0x00000098 +#define AOFF_task_wait_chldexit 0x0000009c #define ASIZ_task_wait_chldexit 0x00000004 -#define AOFF_task_vfork_sem 0x0000009c +#define AOFF_task_vfork_sem 0x000000a0 #define ASIZ_task_vfork_sem 0x00000004 -#define AOFF_task_policy 0x000000a0 +#define AOFF_task_policy 0x000000a4 #define ASIZ_task_policy 0x00000004 -#define AOFF_task_rt_priority 0x000000a4 +#define AOFF_task_rt_priority 0x000000a8 #define ASIZ_task_rt_priority 0x00000004 -#define AOFF_task_it_real_value 0x000000a8 +#define AOFF_task_it_real_value 0x000000ac #define ASIZ_task_it_real_value 0x00000004 -#define AOFF_task_it_prof_value 0x000000ac +#define AOFF_task_it_prof_value 0x000000b0 #define ASIZ_task_it_prof_value 0x00000004 -#define AOFF_task_it_virt_value 0x000000b0 +#define AOFF_task_it_virt_value 0x000000b4 #define ASIZ_task_it_virt_value 0x00000004 -#define AOFF_task_it_real_incr 0x000000b4 +#define AOFF_task_it_real_incr 0x000000b8 #define ASIZ_task_it_real_incr 0x00000004 -#define AOFF_task_it_prof_incr 0x000000b8 +#define AOFF_task_it_prof_incr 0x000000bc #define ASIZ_task_it_prof_incr 0x00000004 -#define AOFF_task_it_virt_incr 0x000000bc +#define AOFF_task_it_virt_incr 0x000000c0 #define ASIZ_task_it_virt_incr 0x00000004 -#define AOFF_task_real_timer 0x000000c0 +#define AOFF_task_real_timer 0x000000c4 #define ASIZ_task_real_timer 0x00000014 -#define AOFF_task_times 0x000000d4 +#define AOFF_task_times 0x000000d8 #define ASIZ_task_times 0x00000010 -#define AOFF_task_start_time 0x000000e4 +#define AOFF_task_start_time 0x000000e8 #define ASIZ_task_start_time 0x00000004 -#define AOFF_task_per_cpu_utime 0x000000e8 +#define AOFF_task_per_cpu_utime 0x000000ec #define ASIZ_task_per_cpu_utime 0x00000004 -#define AOFF_task_min_flt 0x000000f0 +#define AOFF_task_min_flt 0x000000f4 #define ASIZ_task_min_flt 0x00000004 -#define AOFF_task_maj_flt 0x000000f4 +#define AOFF_task_maj_flt 0x000000f8 #define ASIZ_task_maj_flt 0x00000004 -#define AOFF_task_nswap 0x000000f8 +#define AOFF_task_nswap 0x000000fc #define ASIZ_task_nswap 0x00000004 -#define AOFF_task_cmin_flt 0x000000fc +#define AOFF_task_cmin_flt 0x00000100 #define ASIZ_task_cmin_flt 0x00000004 -#define AOFF_task_cmaj_flt 0x00000100 +#define AOFF_task_cmaj_flt 0x00000104 #define ASIZ_task_cmaj_flt 0x00000004 -#define AOFF_task_cnswap 0x00000104 +#define AOFF_task_cnswap 0x00000108 #define ASIZ_task_cnswap 0x00000004 -#define AOFF_task_uid 0x0000010a +#define AOFF_task_uid 0x0000010e #define ASIZ_task_uid 0x00000002 -#define AOFF_task_euid 0x0000010c +#define AOFF_task_euid 0x00000110 #define ASIZ_task_euid 0x00000002 -#define AOFF_task_suid 0x0000010e +#define AOFF_task_suid 0x00000112 #define ASIZ_task_suid 0x00000002 -#define AOFF_task_fsuid 0x00000110 +#define AOFF_task_fsuid 0x00000114 #define ASIZ_task_fsuid 0x00000002 -#define AOFF_task_gid 0x00000112 +#define AOFF_task_gid 0x00000116 #define ASIZ_task_gid 0x00000002 -#define AOFF_task_egid 0x00000114 +#define AOFF_task_egid 0x00000118 #define ASIZ_task_egid 0x00000002 -#define AOFF_task_sgid 0x00000116 +#define AOFF_task_sgid 0x0000011a #define ASIZ_task_sgid 0x00000002 -#define AOFF_task_fsgid 0x00000118 +#define AOFF_task_fsgid 0x0000011c #define ASIZ_task_fsgid 0x00000002 -#define AOFF_task_ngroups 0x0000011c +#define AOFF_task_ngroups 0x00000120 #define ASIZ_task_ngroups 0x00000004 -#define AOFF_task_groups 0x00000120 +#define AOFF_task_groups 0x00000124 #define ASIZ_task_groups 0x00000040 -#define AOFF_task_cap_effective 0x00000160 +#define AOFF_task_cap_effective 0x00000164 #define ASIZ_task_cap_effective 0x00000004 -#define AOFF_task_cap_inheritable 0x00000164 +#define AOFF_task_cap_inheritable 0x00000168 #define ASIZ_task_cap_inheritable 0x00000004 -#define AOFF_task_cap_permitted 0x00000168 +#define AOFF_task_cap_permitted 0x0000016c #define ASIZ_task_cap_permitted 0x00000004 -#define AOFF_task_user 0x00000170 +#define AOFF_task_user 0x00000174 #define ASIZ_task_user 0x00000004 -#define AOFF_task_rlim 0x00000174 +#define AOFF_task_rlim 0x00000178 #define ASIZ_task_rlim 0x00000050 -#define AOFF_task_used_math 0x000001c4 +#define AOFF_task_used_math 0x000001c8 #define ASIZ_task_used_math 0x00000002 -#define AOFF_task_comm 0x000001c6 +#define AOFF_task_comm 0x000001ca #define ASIZ_task_comm 0x00000010 -#define AOFF_task_link_count 0x000001d8 +#define AOFF_task_link_count 0x000001dc #define ASIZ_task_link_count 0x00000004 -#define AOFF_task_tty 0x000001dc +#define AOFF_task_tty 0x000001e0 #define ASIZ_task_tty 0x00000004 -#define AOFF_task_semundo 0x000001e0 +#define AOFF_task_semundo 0x000001e4 #define ASIZ_task_semundo 0x00000004 -#define AOFF_task_semsleeping 0x000001e4 +#define AOFF_task_semsleeping 0x000001e8 #define ASIZ_task_semsleeping 0x00000004 -#define AOFF_task_tss 0x000001e8 +#define AOFF_task_tss 0x000001f0 #define ASIZ_task_tss 0x00000388 -#define AOFF_task_fs 0x00000570 +#define AOFF_task_fs 0x00000578 #define ASIZ_task_fs 0x00000004 -#define AOFF_task_files 0x00000574 +#define AOFF_task_files 0x0000057c #define ASIZ_task_files 0x00000004 -#define AOFF_task_mm 0x00000578 +#define AOFF_task_mm 0x00000580 #define ASIZ_task_mm 0x00000004 -#define AOFF_task_local_pages 0x0000057c +#define AOFF_task_local_pages 0x00000584 #define ASIZ_task_local_pages 0x00000008 -#define AOFF_task_allocation_order 0x00000584 +#define AOFF_task_allocation_order 0x0000058c #define ASIZ_task_allocation_order 0x00000004 -#define AOFF_task_nr_local_pages 0x00000588 +#define AOFF_task_nr_local_pages 0x00000590 #define ASIZ_task_nr_local_pages 0x00000004 -#define AOFF_task_fs_locks 0x0000058c +#define AOFF_task_fs_locks 0x00000594 #define ASIZ_task_fs_locks 0x00000004 -#define AOFF_task_sigmask_lock 0x00000590 +#define AOFF_task_sigmask_lock 0x00000598 #define ASIZ_task_sigmask_lock 0x00000001 -#define AOFF_task_sig 0x00000594 +#define AOFF_task_sig 0x0000059c #define ASIZ_task_sig 0x00000004 -#define AOFF_task_signal 0x00000598 +#define AOFF_task_signal 0x000005a0 #define ASIZ_task_signal 0x00000008 -#define AOFF_task_blocked 0x000005a0 +#define AOFF_task_blocked 0x000005a8 #define ASIZ_task_blocked 0x00000008 -#define AOFF_task_sigqueue 0x000005a8 +#define AOFF_task_sigqueue 0x000005b0 #define ASIZ_task_sigqueue 0x00000004 -#define AOFF_task_sigqueue_tail 0x000005ac +#define AOFF_task_sigqueue_tail 0x000005b4 #define ASIZ_task_sigqueue_tail 0x00000004 -#define AOFF_task_sas_ss_sp 0x000005b0 +#define AOFF_task_sas_ss_sp 0x000005b8 #define ASIZ_task_sas_ss_sp 0x00000004 -#define AOFF_task_sas_ss_size 0x000005b4 +#define AOFF_task_sas_ss_size 0x000005bc #define ASIZ_task_sas_ss_size 0x00000004 -#define AOFF_task_parent_exec_id 0x000005b8 +#define AOFF_task_parent_exec_id 0x000005c0 #define ASIZ_task_parent_exec_id 0x00000004 -#define AOFF_task_self_exec_id 0x000005bc +#define AOFF_task_self_exec_id 0x000005c4 #define ASIZ_task_self_exec_id 0x00000004 -#define AOFF_task_oom_kill_try 0x000005c0 +#define AOFF_task_oom_kill_try 0x000005c8 #define ASIZ_task_oom_kill_try 0x00000004 #define AOFF_mm_mmap 0x00000000 #define ASIZ_mm_mmap 0x00000004 @@ -319,179 +321,181 @@ #define ASIZ_task_priority 0x00000004 #define AOFF_task_avg_slice 0x00000024 #define ASIZ_task_avg_slice 0x00000004 -#define AOFF_task_has_cpu 0x00000028 +#define AOFF_task_counter_refresh 0x00000028 +#define ASIZ_task_counter_refresh 0x00000004 +#define AOFF_task_has_cpu 0x0000002c #define ASIZ_task_has_cpu 0x00000004 -#define AOFF_task_processor 0x0000002c +#define AOFF_task_processor 0x00000030 #define ASIZ_task_processor 0x00000004 -#define AOFF_task_last_processor 0x00000030 +#define AOFF_task_last_processor 0x00000034 #define ASIZ_task_last_processor 0x00000004 -#define AOFF_task_lock_depth 0x00000034 +#define AOFF_task_lock_depth 0x00000038 #define ASIZ_task_lock_depth 0x00000004 -#define AOFF_task_next_task 0x00000038 +#define AOFF_task_next_task 0x0000003c #define ASIZ_task_next_task 0x00000004 -#define AOFF_task_prev_task 0x0000003c +#define AOFF_task_prev_task 0x00000040 #define ASIZ_task_prev_task 0x00000004 -#define AOFF_task_next_run 0x00000040 +#define AOFF_task_next_run 0x00000044 #define ASIZ_task_next_run 0x00000004 -#define AOFF_task_prev_run 0x00000044 +#define AOFF_task_prev_run 0x00000048 #define ASIZ_task_prev_run 0x00000004 -#define AOFF_task_task_exclusive 0x00000048 +#define AOFF_task_task_exclusive 0x0000004c #define ASIZ_task_task_exclusive 0x00000004 -#define AOFF_task_binfmt 0x0000004c +#define AOFF_task_binfmt 0x00000050 #define ASIZ_task_binfmt 0x00000004 -#define AOFF_task_exit_code 0x00000050 +#define AOFF_task_exit_code 0x00000054 #define ASIZ_task_exit_code 0x00000004 -#define AOFF_task_exit_signal 0x00000054 +#define AOFF_task_exit_signal 0x00000058 #define ASIZ_task_exit_signal 0x00000004 -#define AOFF_task_pdeath_signal 0x00000058 +#define AOFF_task_pdeath_signal 0x0000005c #define ASIZ_task_pdeath_signal 0x00000004 -#define AOFF_task_personality 0x0000005c +#define AOFF_task_personality 0x00000060 #define ASIZ_task_personality 0x00000004 -#define AOFF_task_pid 0x00000064 +#define AOFF_task_pid 0x00000068 #define ASIZ_task_pid 0x00000004 -#define AOFF_task_pgrp 0x00000068 +#define AOFF_task_pgrp 0x0000006c #define ASIZ_task_pgrp 0x00000004 -#define AOFF_task_tty_old_pgrp 0x0000006c +#define AOFF_task_tty_old_pgrp 0x00000070 #define ASIZ_task_tty_old_pgrp 0x00000004 -#define AOFF_task_session 0x00000070 +#define AOFF_task_session 0x00000074 #define ASIZ_task_session 0x00000004 -#define AOFF_task_leader 0x00000074 +#define AOFF_task_leader 0x00000078 #define ASIZ_task_leader 0x00000004 -#define AOFF_task_p_opptr 0x00000078 +#define AOFF_task_p_opptr 0x0000007c #define ASIZ_task_p_opptr 0x00000004 -#define AOFF_task_p_pptr 0x0000007c +#define AOFF_task_p_pptr 0x00000080 #define ASIZ_task_p_pptr 0x00000004 -#define AOFF_task_p_cptr 0x00000080 +#define AOFF_task_p_cptr 0x00000084 #define ASIZ_task_p_cptr 0x00000004 -#define AOFF_task_p_ysptr 0x00000084 +#define AOFF_task_p_ysptr 0x00000088 #define ASIZ_task_p_ysptr 0x00000004 -#define AOFF_task_p_osptr 0x00000088 +#define AOFF_task_p_osptr 0x0000008c #define ASIZ_task_p_osptr 0x00000004 -#define AOFF_task_pidhash_next 0x0000008c +#define AOFF_task_pidhash_next 0x00000090 #define ASIZ_task_pidhash_next 0x00000004 -#define AOFF_task_pidhash_pprev 0x00000090 +#define AOFF_task_pidhash_pprev 0x00000094 #define ASIZ_task_pidhash_pprev 0x00000004 -#define AOFF_task_tarray_ptr 0x00000094 +#define AOFF_task_tarray_ptr 0x00000098 #define ASIZ_task_tarray_ptr 0x00000004 -#define AOFF_task_wait_chldexit 0x00000098 +#define AOFF_task_wait_chldexit 0x0000009c #define ASIZ_task_wait_chldexit 0x00000004 -#define AOFF_task_vfork_sem 0x0000009c +#define AOFF_task_vfork_sem 0x000000a0 #define ASIZ_task_vfork_sem 0x00000004 -#define AOFF_task_policy 0x000000a0 +#define AOFF_task_policy 0x000000a4 #define ASIZ_task_policy 0x00000004 -#define AOFF_task_rt_priority 0x000000a4 +#define AOFF_task_rt_priority 0x000000a8 #define ASIZ_task_rt_priority 0x00000004 -#define AOFF_task_it_real_value 0x000000a8 +#define AOFF_task_it_real_value 0x000000ac #define ASIZ_task_it_real_value 0x00000004 -#define AOFF_task_it_prof_value 0x000000ac +#define AOFF_task_it_prof_value 0x000000b0 #define ASIZ_task_it_prof_value 0x00000004 -#define AOFF_task_it_virt_value 0x000000b0 +#define AOFF_task_it_virt_value 0x000000b4 #define ASIZ_task_it_virt_value 0x00000004 -#define AOFF_task_it_real_incr 0x000000b4 +#define AOFF_task_it_real_incr 0x000000b8 #define ASIZ_task_it_real_incr 0x00000004 -#define AOFF_task_it_prof_incr 0x000000b8 +#define AOFF_task_it_prof_incr 0x000000bc #define ASIZ_task_it_prof_incr 0x00000004 -#define AOFF_task_it_virt_incr 0x000000bc +#define AOFF_task_it_virt_incr 0x000000c0 #define ASIZ_task_it_virt_incr 0x00000004 -#define AOFF_task_real_timer 0x000000c0 +#define AOFF_task_real_timer 0x000000c4 #define ASIZ_task_real_timer 0x00000014 -#define AOFF_task_times 0x000000d4 +#define AOFF_task_times 0x000000d8 #define ASIZ_task_times 0x00000010 -#define AOFF_task_start_time 0x000000e4 +#define AOFF_task_start_time 0x000000e8 #define ASIZ_task_start_time 0x00000004 -#define AOFF_task_per_cpu_utime 0x000000e8 +#define AOFF_task_per_cpu_utime 0x000000ec #define ASIZ_task_per_cpu_utime 0x00000080 -#define AOFF_task_min_flt 0x000001e8 +#define AOFF_task_min_flt 0x000001ec #define ASIZ_task_min_flt 0x00000004 -#define AOFF_task_maj_flt 0x000001ec +#define AOFF_task_maj_flt 0x000001f0 #define ASIZ_task_maj_flt 0x00000004 -#define AOFF_task_nswap 0x000001f0 +#define AOFF_task_nswap 0x000001f4 #define ASIZ_task_nswap 0x00000004 -#define AOFF_task_cmin_flt 0x000001f4 +#define AOFF_task_cmin_flt 0x000001f8 #define ASIZ_task_cmin_flt 0x00000004 -#define AOFF_task_cmaj_flt 0x000001f8 +#define AOFF_task_cmaj_flt 0x000001fc #define ASIZ_task_cmaj_flt 0x00000004 -#define AOFF_task_cnswap 0x000001fc +#define AOFF_task_cnswap 0x00000200 #define ASIZ_task_cnswap 0x00000004 -#define AOFF_task_uid 0x00000202 +#define AOFF_task_uid 0x00000206 #define ASIZ_task_uid 0x00000002 -#define AOFF_task_euid 0x00000204 +#define AOFF_task_euid 0x00000208 #define ASIZ_task_euid 0x00000002 -#define AOFF_task_suid 0x00000206 +#define AOFF_task_suid 0x0000020a #define ASIZ_task_suid 0x00000002 -#define AOFF_task_fsuid 0x00000208 +#define AOFF_task_fsuid 0x0000020c #define ASIZ_task_fsuid 0x00000002 -#define AOFF_task_gid 0x0000020a +#define AOFF_task_gid 0x0000020e #define ASIZ_task_gid 0x00000002 -#define AOFF_task_egid 0x0000020c +#define AOFF_task_egid 0x00000210 #define ASIZ_task_egid 0x00000002 -#define AOFF_task_sgid 0x0000020e +#define AOFF_task_sgid 0x00000212 #define ASIZ_task_sgid 0x00000002 -#define AOFF_task_fsgid 0x00000210 +#define AOFF_task_fsgid 0x00000214 #define ASIZ_task_fsgid 0x00000002 -#define AOFF_task_ngroups 0x00000214 +#define AOFF_task_ngroups 0x00000218 #define ASIZ_task_ngroups 0x00000004 -#define AOFF_task_groups 0x00000218 +#define AOFF_task_groups 0x0000021c #define ASIZ_task_groups 0x00000040 -#define AOFF_task_cap_effective 0x00000258 +#define AOFF_task_cap_effective 0x0000025c #define ASIZ_task_cap_effective 0x00000004 -#define AOFF_task_cap_inheritable 0x0000025c +#define AOFF_task_cap_inheritable 0x00000260 #define ASIZ_task_cap_inheritable 0x00000004 -#define AOFF_task_cap_permitted 0x00000260 +#define AOFF_task_cap_permitted 0x00000264 #define ASIZ_task_cap_permitted 0x00000004 -#define AOFF_task_user 0x00000268 +#define AOFF_task_user 0x0000026c #define ASIZ_task_user 0x00000004 -#define AOFF_task_rlim 0x0000026c +#define AOFF_task_rlim 0x00000270 #define ASIZ_task_rlim 0x00000050 -#define AOFF_task_used_math 0x000002bc +#define AOFF_task_used_math 0x000002c0 #define ASIZ_task_used_math 0x00000002 -#define AOFF_task_comm 0x000002be +#define AOFF_task_comm 0x000002c2 #define ASIZ_task_comm 0x00000010 -#define AOFF_task_link_count 0x000002d0 +#define AOFF_task_link_count 0x000002d4 #define ASIZ_task_link_count 0x00000004 -#define AOFF_task_tty 0x000002d4 +#define AOFF_task_tty 0x000002d8 #define ASIZ_task_tty 0x00000004 -#define AOFF_task_semundo 0x000002d8 +#define AOFF_task_semundo 0x000002dc #define ASIZ_task_semundo 0x00000004 -#define AOFF_task_semsleeping 0x000002dc +#define AOFF_task_semsleeping 0x000002e0 #define ASIZ_task_semsleeping 0x00000004 -#define AOFF_task_tss 0x000002e0 +#define AOFF_task_tss 0x000002e8 #define ASIZ_task_tss 0x00000388 -#define AOFF_task_fs 0x00000668 +#define AOFF_task_fs 0x00000670 #define ASIZ_task_fs 0x00000004 -#define AOFF_task_files 0x0000066c +#define AOFF_task_files 0x00000674 #define ASIZ_task_files 0x00000004 -#define AOFF_task_mm 0x00000670 +#define AOFF_task_mm 0x00000678 #define ASIZ_task_mm 0x00000004 -#define AOFF_task_local_pages 0x00000674 +#define AOFF_task_local_pages 0x0000067c #define ASIZ_task_local_pages 0x00000008 -#define AOFF_task_allocation_order 0x0000067c +#define AOFF_task_allocation_order 0x00000684 #define ASIZ_task_allocation_order 0x00000004 -#define AOFF_task_nr_local_pages 0x00000680 +#define AOFF_task_nr_local_pages 0x00000688 #define ASIZ_task_nr_local_pages 0x00000004 -#define AOFF_task_fs_locks 0x00000684 +#define AOFF_task_fs_locks 0x0000068c #define ASIZ_task_fs_locks 0x00000004 -#define AOFF_task_sigmask_lock 0x00000688 +#define AOFF_task_sigmask_lock 0x00000690 #define ASIZ_task_sigmask_lock 0x00000001 -#define AOFF_task_sig 0x0000068c +#define AOFF_task_sig 0x00000694 #define ASIZ_task_sig 0x00000004 -#define AOFF_task_signal 0x00000690 +#define AOFF_task_signal 0x00000698 #define ASIZ_task_signal 0x00000008 -#define AOFF_task_blocked 0x00000698 +#define AOFF_task_blocked 0x000006a0 #define ASIZ_task_blocked 0x00000008 -#define AOFF_task_sigqueue 0x000006a0 +#define AOFF_task_sigqueue 0x000006a8 #define ASIZ_task_sigqueue 0x00000004 -#define AOFF_task_sigqueue_tail 0x000006a4 +#define AOFF_task_sigqueue_tail 0x000006ac #define ASIZ_task_sigqueue_tail 0x00000004 -#define AOFF_task_sas_ss_sp 0x000006a8 +#define AOFF_task_sas_ss_sp 0x000006b0 #define ASIZ_task_sas_ss_sp 0x00000004 -#define AOFF_task_sas_ss_size 0x000006ac +#define AOFF_task_sas_ss_size 0x000006b4 #define ASIZ_task_sas_ss_size 0x00000004 -#define AOFF_task_parent_exec_id 0x000006b0 +#define AOFF_task_parent_exec_id 0x000006b8 #define ASIZ_task_parent_exec_id 0x00000004 -#define AOFF_task_self_exec_id 0x000006b4 +#define AOFF_task_self_exec_id 0x000006bc #define ASIZ_task_self_exec_id 0x00000004 -#define AOFF_task_oom_kill_try 0x000006b8 +#define AOFF_task_oom_kill_try 0x000006c0 #define ASIZ_task_oom_kill_try 0x00000004 #define AOFF_mm_mmap 0x00000000 #define ASIZ_mm_mmap 0x00000004 diff -urN linux.orig/include/asm-sparc/poll.h linux/include/asm-sparc/poll.h --- linux.orig/include/asm-sparc/poll.h Sun Mar 25 18:31:07 2001 +++ linux/include/asm-sparc/poll.h Wed Aug 7 17:45:23 2002 @@ -11,6 +11,7 @@ #define POLLWRNORM POLLOUT #define POLLRDBAND 128 #define POLLWRBAND 256 +#define POLLMSG 512 struct pollfd { int fd; diff -urN linux.orig/include/asm-sparc64/asm_offsets.h linux/include/asm-sparc64/asm_offsets.h --- linux.orig/include/asm-sparc64/asm_offsets.h Fri Nov 2 17:39:08 2001 +++ linux/include/asm-sparc64/asm_offsets.h Wed Aug 7 17:55:29 2002 @@ -26,181 +26,183 @@ #define ASIZ_task_priority 0x00000008 #define AOFF_task_avg_slice 0x00000048 #define ASIZ_task_avg_slice 0x00000008 -#define AOFF_task_has_cpu 0x00000050 +#define AOFF_task_counter_refresh 0x00000050 +#define ASIZ_task_counter_refresh 0x00000004 +#define AOFF_task_has_cpu 0x00000054 #define ASIZ_task_has_cpu 0x00000004 -#define AOFF_task_processor 0x00000054 +#define AOFF_task_processor 0x00000058 #define ASIZ_task_processor 0x00000004 -#define AOFF_task_last_processor 0x00000058 +#define AOFF_task_last_processor 0x0000005c #define ASIZ_task_last_processor 0x00000004 -#define AOFF_task_lock_depth 0x0000005c +#define AOFF_task_lock_depth 0x00000060 #define ASIZ_task_lock_depth 0x00000004 -#define AOFF_task_next_task 0x00000060 +#define AOFF_task_next_task 0x00000068 #define ASIZ_task_next_task 0x00000008 -#define AOFF_task_prev_task 0x00000068 +#define AOFF_task_prev_task 0x00000070 #define ASIZ_task_prev_task 0x00000008 -#define AOFF_task_next_run 0x00000070 +#define AOFF_task_next_run 0x00000078 #define ASIZ_task_next_run 0x00000008 -#define AOFF_task_prev_run 0x00000078 +#define AOFF_task_prev_run 0x00000080 #define ASIZ_task_prev_run 0x00000008 -#define AOFF_task_task_exclusive 0x00000080 +#define AOFF_task_task_exclusive 0x00000088 #define ASIZ_task_task_exclusive 0x00000004 -#define AOFF_task_binfmt 0x00000088 +#define AOFF_task_binfmt 0x00000090 #define ASIZ_task_binfmt 0x00000008 -#define AOFF_task_exit_code 0x00000090 +#define AOFF_task_exit_code 0x00000098 #define ASIZ_task_exit_code 0x00000004 -#define AOFF_task_exit_signal 0x00000094 +#define AOFF_task_exit_signal 0x0000009c #define ASIZ_task_exit_signal 0x00000004 -#define AOFF_task_pdeath_signal 0x00000098 +#define AOFF_task_pdeath_signal 0x000000a0 #define ASIZ_task_pdeath_signal 0x00000004 -#define AOFF_task_personality 0x000000a0 +#define AOFF_task_personality 0x000000a8 #define ASIZ_task_personality 0x00000008 -#define AOFF_task_pid 0x000000ac +#define AOFF_task_pid 0x000000b4 #define ASIZ_task_pid 0x00000004 -#define AOFF_task_pgrp 0x000000b0 +#define AOFF_task_pgrp 0x000000b8 #define ASIZ_task_pgrp 0x00000004 -#define AOFF_task_tty_old_pgrp 0x000000b4 +#define AOFF_task_tty_old_pgrp 0x000000bc #define ASIZ_task_tty_old_pgrp 0x00000004 -#define AOFF_task_session 0x000000b8 +#define AOFF_task_session 0x000000c0 #define ASIZ_task_session 0x00000004 -#define AOFF_task_leader 0x000000bc +#define AOFF_task_leader 0x000000c4 #define ASIZ_task_leader 0x00000004 -#define AOFF_task_p_opptr 0x000000c0 +#define AOFF_task_p_opptr 0x000000c8 #define ASIZ_task_p_opptr 0x00000008 -#define AOFF_task_p_pptr 0x000000c8 +#define AOFF_task_p_pptr 0x000000d0 #define ASIZ_task_p_pptr 0x00000008 -#define AOFF_task_p_cptr 0x000000d0 +#define AOFF_task_p_cptr 0x000000d8 #define ASIZ_task_p_cptr 0x00000008 -#define AOFF_task_p_ysptr 0x000000d8 +#define AOFF_task_p_ysptr 0x000000e0 #define ASIZ_task_p_ysptr 0x00000008 -#define AOFF_task_p_osptr 0x000000e0 +#define AOFF_task_p_osptr 0x000000e8 #define ASIZ_task_p_osptr 0x00000008 -#define AOFF_task_pidhash_next 0x000000e8 +#define AOFF_task_pidhash_next 0x000000f0 #define ASIZ_task_pidhash_next 0x00000008 -#define AOFF_task_pidhash_pprev 0x000000f0 +#define AOFF_task_pidhash_pprev 0x000000f8 #define ASIZ_task_pidhash_pprev 0x00000008 -#define AOFF_task_tarray_ptr 0x000000f8 +#define AOFF_task_tarray_ptr 0x00000100 #define ASIZ_task_tarray_ptr 0x00000008 -#define AOFF_task_wait_chldexit 0x00000100 +#define AOFF_task_wait_chldexit 0x00000108 #define ASIZ_task_wait_chldexit 0x00000008 -#define AOFF_task_vfork_sem 0x00000108 +#define AOFF_task_vfork_sem 0x00000110 #define ASIZ_task_vfork_sem 0x00000008 -#define AOFF_task_policy 0x00000110 +#define AOFF_task_policy 0x00000118 #define ASIZ_task_policy 0x00000008 -#define AOFF_task_rt_priority 0x00000118 +#define AOFF_task_rt_priority 0x00000120 #define ASIZ_task_rt_priority 0x00000008 -#define AOFF_task_it_real_value 0x00000120 +#define AOFF_task_it_real_value 0x00000128 #define ASIZ_task_it_real_value 0x00000008 -#define AOFF_task_it_prof_value 0x00000128 +#define AOFF_task_it_prof_value 0x00000130 #define ASIZ_task_it_prof_value 0x00000008 -#define AOFF_task_it_virt_value 0x00000130 +#define AOFF_task_it_virt_value 0x00000138 #define ASIZ_task_it_virt_value 0x00000008 -#define AOFF_task_it_real_incr 0x00000138 +#define AOFF_task_it_real_incr 0x00000140 #define ASIZ_task_it_real_incr 0x00000008 -#define AOFF_task_it_prof_incr 0x00000140 +#define AOFF_task_it_prof_incr 0x00000148 #define ASIZ_task_it_prof_incr 0x00000008 -#define AOFF_task_it_virt_incr 0x00000148 +#define AOFF_task_it_virt_incr 0x00000150 #define ASIZ_task_it_virt_incr 0x00000008 -#define AOFF_task_real_timer 0x00000150 +#define AOFF_task_real_timer 0x00000158 #define ASIZ_task_real_timer 0x00000028 -#define AOFF_task_times 0x00000178 +#define AOFF_task_times 0x00000180 #define ASIZ_task_times 0x00000020 -#define AOFF_task_start_time 0x00000198 +#define AOFF_task_start_time 0x000001a0 #define ASIZ_task_start_time 0x00000008 -#define AOFF_task_per_cpu_utime 0x000001a0 +#define AOFF_task_per_cpu_utime 0x000001a8 #define ASIZ_task_per_cpu_utime 0x00000008 -#define AOFF_task_min_flt 0x000001b0 +#define AOFF_task_min_flt 0x000001b8 #define ASIZ_task_min_flt 0x00000008 -#define AOFF_task_maj_flt 0x000001b8 +#define AOFF_task_maj_flt 0x000001c0 #define ASIZ_task_maj_flt 0x00000008 -#define AOFF_task_nswap 0x000001c0 +#define AOFF_task_nswap 0x000001c8 #define ASIZ_task_nswap 0x00000008 -#define AOFF_task_cmin_flt 0x000001c8 +#define AOFF_task_cmin_flt 0x000001d0 #define ASIZ_task_cmin_flt 0x00000008 -#define AOFF_task_cmaj_flt 0x000001d0 +#define AOFF_task_cmaj_flt 0x000001d8 #define ASIZ_task_cmaj_flt 0x00000008 -#define AOFF_task_cnswap 0x000001d8 +#define AOFF_task_cnswap 0x000001e0 #define ASIZ_task_cnswap 0x00000008 -#define AOFF_task_uid 0x000001e4 +#define AOFF_task_uid 0x000001ec #define ASIZ_task_uid 0x00000004 -#define AOFF_task_euid 0x000001e8 +#define AOFF_task_euid 0x000001f0 #define ASIZ_task_euid 0x00000004 -#define AOFF_task_suid 0x000001ec +#define AOFF_task_suid 0x000001f4 #define ASIZ_task_suid 0x00000004 -#define AOFF_task_fsuid 0x000001f0 +#define AOFF_task_fsuid 0x000001f8 #define ASIZ_task_fsuid 0x00000004 -#define AOFF_task_gid 0x000001f4 +#define AOFF_task_gid 0x000001fc #define ASIZ_task_gid 0x00000004 -#define AOFF_task_egid 0x000001f8 +#define AOFF_task_egid 0x00000200 #define ASIZ_task_egid 0x00000004 -#define AOFF_task_sgid 0x000001fc +#define AOFF_task_sgid 0x00000204 #define ASIZ_task_sgid 0x00000004 -#define AOFF_task_fsgid 0x00000200 +#define AOFF_task_fsgid 0x00000208 #define ASIZ_task_fsgid 0x00000004 -#define AOFF_task_ngroups 0x00000204 +#define AOFF_task_ngroups 0x0000020c #define ASIZ_task_ngroups 0x00000004 -#define AOFF_task_groups 0x00000208 +#define AOFF_task_groups 0x00000210 #define ASIZ_task_groups 0x00000080 -#define AOFF_task_cap_effective 0x00000288 +#define AOFF_task_cap_effective 0x00000290 #define ASIZ_task_cap_effective 0x00000004 -#define AOFF_task_cap_inheritable 0x0000028c +#define AOFF_task_cap_inheritable 0x00000294 #define ASIZ_task_cap_inheritable 0x00000004 -#define AOFF_task_cap_permitted 0x00000290 +#define AOFF_task_cap_permitted 0x00000298 #define ASIZ_task_cap_permitted 0x00000004 -#define AOFF_task_user 0x00000298 +#define AOFF_task_user 0x000002a0 #define ASIZ_task_user 0x00000008 -#define AOFF_task_rlim 0x000002a0 +#define AOFF_task_rlim 0x000002a8 #define ASIZ_task_rlim 0x000000a0 -#define AOFF_task_used_math 0x00000340 +#define AOFF_task_used_math 0x00000348 #define ASIZ_task_used_math 0x00000002 -#define AOFF_task_comm 0x00000342 +#define AOFF_task_comm 0x0000034a #define ASIZ_task_comm 0x00000010 -#define AOFF_task_link_count 0x00000354 +#define AOFF_task_link_count 0x0000035c #define ASIZ_task_link_count 0x00000004 -#define AOFF_task_tty 0x00000358 +#define AOFF_task_tty 0x00000360 #define ASIZ_task_tty 0x00000008 -#define AOFF_task_semundo 0x00000360 +#define AOFF_task_semundo 0x00000368 #define ASIZ_task_semundo 0x00000008 -#define AOFF_task_semsleeping 0x00000368 +#define AOFF_task_semsleeping 0x00000370 #define ASIZ_task_semsleeping 0x00000008 -#define AOFF_task_tss 0x00000370 +#define AOFF_task_tss 0x00000380 #define ASIZ_task_tss 0x00000470 -#define AOFF_task_fs 0x000007e0 +#define AOFF_task_fs 0x000007f0 #define ASIZ_task_fs 0x00000008 -#define AOFF_task_files 0x000007e8 +#define AOFF_task_files 0x000007f8 #define ASIZ_task_files 0x00000008 -#define AOFF_task_mm 0x000007f0 +#define AOFF_task_mm 0x00000800 #define ASIZ_task_mm 0x00000008 -#define AOFF_task_local_pages 0x000007f8 +#define AOFF_task_local_pages 0x00000808 #define ASIZ_task_local_pages 0x00000010 -#define AOFF_task_allocation_order 0x00000808 +#define AOFF_task_allocation_order 0x00000818 #define ASIZ_task_allocation_order 0x00000004 -#define AOFF_task_nr_local_pages 0x0000080c +#define AOFF_task_nr_local_pages 0x0000081c #define ASIZ_task_nr_local_pages 0x00000004 -#define AOFF_task_fs_locks 0x00000810 +#define AOFF_task_fs_locks 0x00000820 #define ASIZ_task_fs_locks 0x00000004 -#define AOFF_task_sigmask_lock 0x00000814 +#define AOFF_task_sigmask_lock 0x00000824 #define ASIZ_task_sigmask_lock 0x00000001 -#define AOFF_task_sig 0x00000818 +#define AOFF_task_sig 0x00000828 #define ASIZ_task_sig 0x00000008 -#define AOFF_task_signal 0x00000820 +#define AOFF_task_signal 0x00000830 #define ASIZ_task_signal 0x00000008 -#define AOFF_task_blocked 0x00000828 +#define AOFF_task_blocked 0x00000838 #define ASIZ_task_blocked 0x00000008 -#define AOFF_task_sigqueue 0x00000830 +#define AOFF_task_sigqueue 0x00000840 #define ASIZ_task_sigqueue 0x00000008 -#define AOFF_task_sigqueue_tail 0x00000838 +#define AOFF_task_sigqueue_tail 0x00000848 #define ASIZ_task_sigqueue_tail 0x00000008 -#define AOFF_task_sas_ss_sp 0x00000840 +#define AOFF_task_sas_ss_sp 0x00000850 #define ASIZ_task_sas_ss_sp 0x00000008 -#define AOFF_task_sas_ss_size 0x00000848 +#define AOFF_task_sas_ss_size 0x00000858 #define ASIZ_task_sas_ss_size 0x00000008 -#define AOFF_task_parent_exec_id 0x00000850 +#define AOFF_task_parent_exec_id 0x00000860 #define ASIZ_task_parent_exec_id 0x00000004 -#define AOFF_task_self_exec_id 0x00000854 +#define AOFF_task_self_exec_id 0x00000864 #define ASIZ_task_self_exec_id 0x00000004 -#define AOFF_task_oom_kill_try 0x00000858 +#define AOFF_task_oom_kill_try 0x00000868 #define ASIZ_task_oom_kill_try 0x00000004 -#define ASIZ_task 0x00000860 +#define ASIZ_task 0x00000870 #define AOFF_mm_mmap 0x00000000 #define ASIZ_mm_mmap 0x00000008 #define AOFF_mm_mmap_avl 0x00000008 @@ -330,181 +332,183 @@ #define ASIZ_task_priority 0x00000008 #define AOFF_task_avg_slice 0x00000048 #define ASIZ_task_avg_slice 0x00000008 -#define AOFF_task_has_cpu 0x00000050 +#define AOFF_task_counter_refresh 0x00000050 +#define ASIZ_task_counter_refresh 0x00000004 +#define AOFF_task_has_cpu 0x00000054 #define ASIZ_task_has_cpu 0x00000004 -#define AOFF_task_processor 0x00000054 +#define AOFF_task_processor 0x00000058 #define ASIZ_task_processor 0x00000004 -#define AOFF_task_last_processor 0x00000058 +#define AOFF_task_last_processor 0x0000005c #define ASIZ_task_last_processor 0x00000004 -#define AOFF_task_lock_depth 0x0000005c +#define AOFF_task_lock_depth 0x00000060 #define ASIZ_task_lock_depth 0x00000004 -#define AOFF_task_next_task 0x00000060 +#define AOFF_task_next_task 0x00000068 #define ASIZ_task_next_task 0x00000008 -#define AOFF_task_prev_task 0x00000068 +#define AOFF_task_prev_task 0x00000070 #define ASIZ_task_prev_task 0x00000008 -#define AOFF_task_next_run 0x00000070 +#define AOFF_task_next_run 0x00000078 #define ASIZ_task_next_run 0x00000008 -#define AOFF_task_prev_run 0x00000078 +#define AOFF_task_prev_run 0x00000080 #define ASIZ_task_prev_run 0x00000008 -#define AOFF_task_task_exclusive 0x00000080 +#define AOFF_task_task_exclusive 0x00000088 #define ASIZ_task_task_exclusive 0x00000004 -#define AOFF_task_binfmt 0x00000088 +#define AOFF_task_binfmt 0x00000090 #define ASIZ_task_binfmt 0x00000008 -#define AOFF_task_exit_code 0x00000090 +#define AOFF_task_exit_code 0x00000098 #define ASIZ_task_exit_code 0x00000004 -#define AOFF_task_exit_signal 0x00000094 +#define AOFF_task_exit_signal 0x0000009c #define ASIZ_task_exit_signal 0x00000004 -#define AOFF_task_pdeath_signal 0x00000098 +#define AOFF_task_pdeath_signal 0x000000a0 #define ASIZ_task_pdeath_signal 0x00000004 -#define AOFF_task_personality 0x000000a0 +#define AOFF_task_personality 0x000000a8 #define ASIZ_task_personality 0x00000008 -#define AOFF_task_pid 0x000000ac +#define AOFF_task_pid 0x000000b4 #define ASIZ_task_pid 0x00000004 -#define AOFF_task_pgrp 0x000000b0 +#define AOFF_task_pgrp 0x000000b8 #define ASIZ_task_pgrp 0x00000004 -#define AOFF_task_tty_old_pgrp 0x000000b4 +#define AOFF_task_tty_old_pgrp 0x000000bc #define ASIZ_task_tty_old_pgrp 0x00000004 -#define AOFF_task_session 0x000000b8 +#define AOFF_task_session 0x000000c0 #define ASIZ_task_session 0x00000004 -#define AOFF_task_leader 0x000000bc +#define AOFF_task_leader 0x000000c4 #define ASIZ_task_leader 0x00000004 -#define AOFF_task_p_opptr 0x000000c0 +#define AOFF_task_p_opptr 0x000000c8 #define ASIZ_task_p_opptr 0x00000008 -#define AOFF_task_p_pptr 0x000000c8 +#define AOFF_task_p_pptr 0x000000d0 #define ASIZ_task_p_pptr 0x00000008 -#define AOFF_task_p_cptr 0x000000d0 +#define AOFF_task_p_cptr 0x000000d8 #define ASIZ_task_p_cptr 0x00000008 -#define AOFF_task_p_ysptr 0x000000d8 +#define AOFF_task_p_ysptr 0x000000e0 #define ASIZ_task_p_ysptr 0x00000008 -#define AOFF_task_p_osptr 0x000000e0 +#define AOFF_task_p_osptr 0x000000e8 #define ASIZ_task_p_osptr 0x00000008 -#define AOFF_task_pidhash_next 0x000000e8 +#define AOFF_task_pidhash_next 0x000000f0 #define ASIZ_task_pidhash_next 0x00000008 -#define AOFF_task_pidhash_pprev 0x000000f0 +#define AOFF_task_pidhash_pprev 0x000000f8 #define ASIZ_task_pidhash_pprev 0x00000008 -#define AOFF_task_tarray_ptr 0x000000f8 +#define AOFF_task_tarray_ptr 0x00000100 #define ASIZ_task_tarray_ptr 0x00000008 -#define AOFF_task_wait_chldexit 0x00000100 +#define AOFF_task_wait_chldexit 0x00000108 #define ASIZ_task_wait_chldexit 0x00000008 -#define AOFF_task_vfork_sem 0x00000108 +#define AOFF_task_vfork_sem 0x00000110 #define ASIZ_task_vfork_sem 0x00000008 -#define AOFF_task_policy 0x00000110 +#define AOFF_task_policy 0x00000118 #define ASIZ_task_policy 0x00000008 -#define AOFF_task_rt_priority 0x00000118 +#define AOFF_task_rt_priority 0x00000120 #define ASIZ_task_rt_priority 0x00000008 -#define AOFF_task_it_real_value 0x00000120 +#define AOFF_task_it_real_value 0x00000128 #define ASIZ_task_it_real_value 0x00000008 -#define AOFF_task_it_prof_value 0x00000128 +#define AOFF_task_it_prof_value 0x00000130 #define ASIZ_task_it_prof_value 0x00000008 -#define AOFF_task_it_virt_value 0x00000130 +#define AOFF_task_it_virt_value 0x00000138 #define ASIZ_task_it_virt_value 0x00000008 -#define AOFF_task_it_real_incr 0x00000138 +#define AOFF_task_it_real_incr 0x00000140 #define ASIZ_task_it_real_incr 0x00000008 -#define AOFF_task_it_prof_incr 0x00000140 +#define AOFF_task_it_prof_incr 0x00000148 #define ASIZ_task_it_prof_incr 0x00000008 -#define AOFF_task_it_virt_incr 0x00000148 +#define AOFF_task_it_virt_incr 0x00000150 #define ASIZ_task_it_virt_incr 0x00000008 -#define AOFF_task_real_timer 0x00000150 +#define AOFF_task_real_timer 0x00000158 #define ASIZ_task_real_timer 0x00000028 -#define AOFF_task_times 0x00000178 +#define AOFF_task_times 0x00000180 #define ASIZ_task_times 0x00000020 -#define AOFF_task_start_time 0x00000198 +#define AOFF_task_start_time 0x000001a0 #define ASIZ_task_start_time 0x00000008 -#define AOFF_task_per_cpu_utime 0x000001a0 +#define AOFF_task_per_cpu_utime 0x000001a8 #define ASIZ_task_per_cpu_utime 0x00000100 -#define AOFF_task_min_flt 0x000003a0 +#define AOFF_task_min_flt 0x000003a8 #define ASIZ_task_min_flt 0x00000008 -#define AOFF_task_maj_flt 0x000003a8 +#define AOFF_task_maj_flt 0x000003b0 #define ASIZ_task_maj_flt 0x00000008 -#define AOFF_task_nswap 0x000003b0 +#define AOFF_task_nswap 0x000003b8 #define ASIZ_task_nswap 0x00000008 -#define AOFF_task_cmin_flt 0x000003b8 +#define AOFF_task_cmin_flt 0x000003c0 #define ASIZ_task_cmin_flt 0x00000008 -#define AOFF_task_cmaj_flt 0x000003c0 +#define AOFF_task_cmaj_flt 0x000003c8 #define ASIZ_task_cmaj_flt 0x00000008 -#define AOFF_task_cnswap 0x000003c8 +#define AOFF_task_cnswap 0x000003d0 #define ASIZ_task_cnswap 0x00000008 -#define AOFF_task_uid 0x000003d4 +#define AOFF_task_uid 0x000003dc #define ASIZ_task_uid 0x00000004 -#define AOFF_task_euid 0x000003d8 +#define AOFF_task_euid 0x000003e0 #define ASIZ_task_euid 0x00000004 -#define AOFF_task_suid 0x000003dc +#define AOFF_task_suid 0x000003e4 #define ASIZ_task_suid 0x00000004 -#define AOFF_task_fsuid 0x000003e0 +#define AOFF_task_fsuid 0x000003e8 #define ASIZ_task_fsuid 0x00000004 -#define AOFF_task_gid 0x000003e4 +#define AOFF_task_gid 0x000003ec #define ASIZ_task_gid 0x00000004 -#define AOFF_task_egid 0x000003e8 +#define AOFF_task_egid 0x000003f0 #define ASIZ_task_egid 0x00000004 -#define AOFF_task_sgid 0x000003ec +#define AOFF_task_sgid 0x000003f4 #define ASIZ_task_sgid 0x00000004 -#define AOFF_task_fsgid 0x000003f0 +#define AOFF_task_fsgid 0x000003f8 #define ASIZ_task_fsgid 0x00000004 -#define AOFF_task_ngroups 0x000003f4 +#define AOFF_task_ngroups 0x000003fc #define ASIZ_task_ngroups 0x00000004 -#define AOFF_task_groups 0x000003f8 +#define AOFF_task_groups 0x00000400 #define ASIZ_task_groups 0x00000080 -#define AOFF_task_cap_effective 0x00000478 +#define AOFF_task_cap_effective 0x00000480 #define ASIZ_task_cap_effective 0x00000004 -#define AOFF_task_cap_inheritable 0x0000047c +#define AOFF_task_cap_inheritable 0x00000484 #define ASIZ_task_cap_inheritable 0x00000004 -#define AOFF_task_cap_permitted 0x00000480 +#define AOFF_task_cap_permitted 0x00000488 #define ASIZ_task_cap_permitted 0x00000004 -#define AOFF_task_user 0x00000488 +#define AOFF_task_user 0x00000490 #define ASIZ_task_user 0x00000008 -#define AOFF_task_rlim 0x00000490 +#define AOFF_task_rlim 0x00000498 #define ASIZ_task_rlim 0x000000a0 -#define AOFF_task_used_math 0x00000530 +#define AOFF_task_used_math 0x00000538 #define ASIZ_task_used_math 0x00000002 -#define AOFF_task_comm 0x00000532 +#define AOFF_task_comm 0x0000053a #define ASIZ_task_comm 0x00000010 -#define AOFF_task_link_count 0x00000544 +#define AOFF_task_link_count 0x0000054c #define ASIZ_task_link_count 0x00000004 -#define AOFF_task_tty 0x00000548 +#define AOFF_task_tty 0x00000550 #define ASIZ_task_tty 0x00000008 -#define AOFF_task_semundo 0x00000550 +#define AOFF_task_semundo 0x00000558 #define ASIZ_task_semundo 0x00000008 -#define AOFF_task_semsleeping 0x00000558 +#define AOFF_task_semsleeping 0x00000560 #define ASIZ_task_semsleeping 0x00000008 -#define AOFF_task_tss 0x00000560 +#define AOFF_task_tss 0x00000570 #define ASIZ_task_tss 0x00000470 -#define AOFF_task_fs 0x000009d0 +#define AOFF_task_fs 0x000009e0 #define ASIZ_task_fs 0x00000008 -#define AOFF_task_files 0x000009d8 +#define AOFF_task_files 0x000009e8 #define ASIZ_task_files 0x00000008 -#define AOFF_task_mm 0x000009e0 +#define AOFF_task_mm 0x000009f0 #define ASIZ_task_mm 0x00000008 -#define AOFF_task_local_pages 0x000009e8 +#define AOFF_task_local_pages 0x000009f8 #define ASIZ_task_local_pages 0x00000010 -#define AOFF_task_allocation_order 0x000009f8 +#define AOFF_task_allocation_order 0x00000a08 #define ASIZ_task_allocation_order 0x00000004 -#define AOFF_task_nr_local_pages 0x000009fc +#define AOFF_task_nr_local_pages 0x00000a0c #define ASIZ_task_nr_local_pages 0x00000004 -#define AOFF_task_fs_locks 0x00000a00 +#define AOFF_task_fs_locks 0x00000a10 #define ASIZ_task_fs_locks 0x00000004 -#define AOFF_task_sigmask_lock 0x00000a04 +#define AOFF_task_sigmask_lock 0x00000a14 #define ASIZ_task_sigmask_lock 0x00000001 -#define AOFF_task_sig 0x00000a08 +#define AOFF_task_sig 0x00000a18 #define ASIZ_task_sig 0x00000008 -#define AOFF_task_signal 0x00000a10 +#define AOFF_task_signal 0x00000a20 #define ASIZ_task_signal 0x00000008 -#define AOFF_task_blocked 0x00000a18 +#define AOFF_task_blocked 0x00000a28 #define ASIZ_task_blocked 0x00000008 -#define AOFF_task_sigqueue 0x00000a20 +#define AOFF_task_sigqueue 0x00000a30 #define ASIZ_task_sigqueue 0x00000008 -#define AOFF_task_sigqueue_tail 0x00000a28 +#define AOFF_task_sigqueue_tail 0x00000a38 #define ASIZ_task_sigqueue_tail 0x00000008 -#define AOFF_task_sas_ss_sp 0x00000a30 +#define AOFF_task_sas_ss_sp 0x00000a40 #define ASIZ_task_sas_ss_sp 0x00000008 -#define AOFF_task_sas_ss_size 0x00000a38 +#define AOFF_task_sas_ss_size 0x00000a48 #define ASIZ_task_sas_ss_size 0x00000008 -#define AOFF_task_parent_exec_id 0x00000a40 +#define AOFF_task_parent_exec_id 0x00000a50 #define ASIZ_task_parent_exec_id 0x00000004 -#define AOFF_task_self_exec_id 0x00000a44 +#define AOFF_task_self_exec_id 0x00000a54 #define ASIZ_task_self_exec_id 0x00000004 -#define AOFF_task_oom_kill_try 0x00000a48 +#define AOFF_task_oom_kill_try 0x00000a58 #define ASIZ_task_oom_kill_try 0x00000004 -#define ASIZ_task 0x00000a50 +#define ASIZ_task 0x00000a60 #define AOFF_mm_mmap 0x00000000 #define ASIZ_mm_mmap 0x00000008 #define AOFF_mm_mmap_avl 0x00000008 @@ -632,181 +636,183 @@ #define ASIZ_task_priority 0x00000008 #define AOFF_task_avg_slice 0x00000048 #define ASIZ_task_avg_slice 0x00000008 -#define AOFF_task_has_cpu 0x00000050 +#define AOFF_task_counter_refresh 0x00000050 +#define ASIZ_task_counter_refresh 0x00000004 +#define AOFF_task_has_cpu 0x00000054 #define ASIZ_task_has_cpu 0x00000004 -#define AOFF_task_processor 0x00000054 +#define AOFF_task_processor 0x00000058 #define ASIZ_task_processor 0x00000004 -#define AOFF_task_last_processor 0x00000058 +#define AOFF_task_last_processor 0x0000005c #define ASIZ_task_last_processor 0x00000004 -#define AOFF_task_lock_depth 0x0000005c +#define AOFF_task_lock_depth 0x00000060 #define ASIZ_task_lock_depth 0x00000004 -#define AOFF_task_next_task 0x00000060 +#define AOFF_task_next_task 0x00000068 #define ASIZ_task_next_task 0x00000008 -#define AOFF_task_prev_task 0x00000068 +#define AOFF_task_prev_task 0x00000070 #define ASIZ_task_prev_task 0x00000008 -#define AOFF_task_next_run 0x00000070 +#define AOFF_task_next_run 0x00000078 #define ASIZ_task_next_run 0x00000008 -#define AOFF_task_prev_run 0x00000078 +#define AOFF_task_prev_run 0x00000080 #define ASIZ_task_prev_run 0x00000008 -#define AOFF_task_task_exclusive 0x00000080 +#define AOFF_task_task_exclusive 0x00000088 #define ASIZ_task_task_exclusive 0x00000004 -#define AOFF_task_binfmt 0x00000088 +#define AOFF_task_binfmt 0x00000090 #define ASIZ_task_binfmt 0x00000008 -#define AOFF_task_exit_code 0x00000090 +#define AOFF_task_exit_code 0x00000098 #define ASIZ_task_exit_code 0x00000004 -#define AOFF_task_exit_signal 0x00000094 +#define AOFF_task_exit_signal 0x0000009c #define ASIZ_task_exit_signal 0x00000004 -#define AOFF_task_pdeath_signal 0x00000098 +#define AOFF_task_pdeath_signal 0x000000a0 #define ASIZ_task_pdeath_signal 0x00000004 -#define AOFF_task_personality 0x000000a0 +#define AOFF_task_personality 0x000000a8 #define ASIZ_task_personality 0x00000008 -#define AOFF_task_pid 0x000000ac +#define AOFF_task_pid 0x000000b4 #define ASIZ_task_pid 0x00000004 -#define AOFF_task_pgrp 0x000000b0 +#define AOFF_task_pgrp 0x000000b8 #define ASIZ_task_pgrp 0x00000004 -#define AOFF_task_tty_old_pgrp 0x000000b4 +#define AOFF_task_tty_old_pgrp 0x000000bc #define ASIZ_task_tty_old_pgrp 0x00000004 -#define AOFF_task_session 0x000000b8 +#define AOFF_task_session 0x000000c0 #define ASIZ_task_session 0x00000004 -#define AOFF_task_leader 0x000000bc +#define AOFF_task_leader 0x000000c4 #define ASIZ_task_leader 0x00000004 -#define AOFF_task_p_opptr 0x000000c0 +#define AOFF_task_p_opptr 0x000000c8 #define ASIZ_task_p_opptr 0x00000008 -#define AOFF_task_p_pptr 0x000000c8 +#define AOFF_task_p_pptr 0x000000d0 #define ASIZ_task_p_pptr 0x00000008 -#define AOFF_task_p_cptr 0x000000d0 +#define AOFF_task_p_cptr 0x000000d8 #define ASIZ_task_p_cptr 0x00000008 -#define AOFF_task_p_ysptr 0x000000d8 +#define AOFF_task_p_ysptr 0x000000e0 #define ASIZ_task_p_ysptr 0x00000008 -#define AOFF_task_p_osptr 0x000000e0 +#define AOFF_task_p_osptr 0x000000e8 #define ASIZ_task_p_osptr 0x00000008 -#define AOFF_task_pidhash_next 0x000000e8 +#define AOFF_task_pidhash_next 0x000000f0 #define ASIZ_task_pidhash_next 0x00000008 -#define AOFF_task_pidhash_pprev 0x000000f0 +#define AOFF_task_pidhash_pprev 0x000000f8 #define ASIZ_task_pidhash_pprev 0x00000008 -#define AOFF_task_tarray_ptr 0x000000f8 +#define AOFF_task_tarray_ptr 0x00000100 #define ASIZ_task_tarray_ptr 0x00000008 -#define AOFF_task_wait_chldexit 0x00000100 +#define AOFF_task_wait_chldexit 0x00000108 #define ASIZ_task_wait_chldexit 0x00000008 -#define AOFF_task_vfork_sem 0x00000108 +#define AOFF_task_vfork_sem 0x00000110 #define ASIZ_task_vfork_sem 0x00000008 -#define AOFF_task_policy 0x00000110 +#define AOFF_task_policy 0x00000118 #define ASIZ_task_policy 0x00000008 -#define AOFF_task_rt_priority 0x00000118 +#define AOFF_task_rt_priority 0x00000120 #define ASIZ_task_rt_priority 0x00000008 -#define AOFF_task_it_real_value 0x00000120 +#define AOFF_task_it_real_value 0x00000128 #define ASIZ_task_it_real_value 0x00000008 -#define AOFF_task_it_prof_value 0x00000128 +#define AOFF_task_it_prof_value 0x00000130 #define ASIZ_task_it_prof_value 0x00000008 -#define AOFF_task_it_virt_value 0x00000130 +#define AOFF_task_it_virt_value 0x00000138 #define ASIZ_task_it_virt_value 0x00000008 -#define AOFF_task_it_real_incr 0x00000138 +#define AOFF_task_it_real_incr 0x00000140 #define ASIZ_task_it_real_incr 0x00000008 -#define AOFF_task_it_prof_incr 0x00000140 +#define AOFF_task_it_prof_incr 0x00000148 #define ASIZ_task_it_prof_incr 0x00000008 -#define AOFF_task_it_virt_incr 0x00000148 +#define AOFF_task_it_virt_incr 0x00000150 #define ASIZ_task_it_virt_incr 0x00000008 -#define AOFF_task_real_timer 0x00000150 +#define AOFF_task_real_timer 0x00000158 #define ASIZ_task_real_timer 0x00000028 -#define AOFF_task_times 0x00000178 +#define AOFF_task_times 0x00000180 #define ASIZ_task_times 0x00000020 -#define AOFF_task_start_time 0x00000198 +#define AOFF_task_start_time 0x000001a0 #define ASIZ_task_start_time 0x00000008 -#define AOFF_task_per_cpu_utime 0x000001a0 +#define AOFF_task_per_cpu_utime 0x000001a8 #define ASIZ_task_per_cpu_utime 0x00000100 -#define AOFF_task_min_flt 0x000003a0 +#define AOFF_task_min_flt 0x000003a8 #define ASIZ_task_min_flt 0x00000008 -#define AOFF_task_maj_flt 0x000003a8 +#define AOFF_task_maj_flt 0x000003b0 #define ASIZ_task_maj_flt 0x00000008 -#define AOFF_task_nswap 0x000003b0 +#define AOFF_task_nswap 0x000003b8 #define ASIZ_task_nswap 0x00000008 -#define AOFF_task_cmin_flt 0x000003b8 +#define AOFF_task_cmin_flt 0x000003c0 #define ASIZ_task_cmin_flt 0x00000008 -#define AOFF_task_cmaj_flt 0x000003c0 +#define AOFF_task_cmaj_flt 0x000003c8 #define ASIZ_task_cmaj_flt 0x00000008 -#define AOFF_task_cnswap 0x000003c8 +#define AOFF_task_cnswap 0x000003d0 #define ASIZ_task_cnswap 0x00000008 -#define AOFF_task_uid 0x000003d4 +#define AOFF_task_uid 0x000003dc #define ASIZ_task_uid 0x00000004 -#define AOFF_task_euid 0x000003d8 +#define AOFF_task_euid 0x000003e0 #define ASIZ_task_euid 0x00000004 -#define AOFF_task_suid 0x000003dc +#define AOFF_task_suid 0x000003e4 #define ASIZ_task_suid 0x00000004 -#define AOFF_task_fsuid 0x000003e0 +#define AOFF_task_fsuid 0x000003e8 #define ASIZ_task_fsuid 0x00000004 -#define AOFF_task_gid 0x000003e4 +#define AOFF_task_gid 0x000003ec #define ASIZ_task_gid 0x00000004 -#define AOFF_task_egid 0x000003e8 +#define AOFF_task_egid 0x000003f0 #define ASIZ_task_egid 0x00000004 -#define AOFF_task_sgid 0x000003ec +#define AOFF_task_sgid 0x000003f4 #define ASIZ_task_sgid 0x00000004 -#define AOFF_task_fsgid 0x000003f0 +#define AOFF_task_fsgid 0x000003f8 #define ASIZ_task_fsgid 0x00000004 -#define AOFF_task_ngroups 0x000003f4 +#define AOFF_task_ngroups 0x000003fc #define ASIZ_task_ngroups 0x00000004 -#define AOFF_task_groups 0x000003f8 +#define AOFF_task_groups 0x00000400 #define ASIZ_task_groups 0x00000080 -#define AOFF_task_cap_effective 0x00000478 +#define AOFF_task_cap_effective 0x00000480 #define ASIZ_task_cap_effective 0x00000004 -#define AOFF_task_cap_inheritable 0x0000047c +#define AOFF_task_cap_inheritable 0x00000484 #define ASIZ_task_cap_inheritable 0x00000004 -#define AOFF_task_cap_permitted 0x00000480 +#define AOFF_task_cap_permitted 0x00000488 #define ASIZ_task_cap_permitted 0x00000004 -#define AOFF_task_user 0x00000488 +#define AOFF_task_user 0x00000490 #define ASIZ_task_user 0x00000008 -#define AOFF_task_rlim 0x00000490 +#define AOFF_task_rlim 0x00000498 #define ASIZ_task_rlim 0x000000a0 -#define AOFF_task_used_math 0x00000530 +#define AOFF_task_used_math 0x00000538 #define ASIZ_task_used_math 0x00000002 -#define AOFF_task_comm 0x00000532 +#define AOFF_task_comm 0x0000053a #define ASIZ_task_comm 0x00000010 -#define AOFF_task_link_count 0x00000544 +#define AOFF_task_link_count 0x0000054c #define ASIZ_task_link_count 0x00000004 -#define AOFF_task_tty 0x00000548 +#define AOFF_task_tty 0x00000550 #define ASIZ_task_tty 0x00000008 -#define AOFF_task_semundo 0x00000550 +#define AOFF_task_semundo 0x00000558 #define ASIZ_task_semundo 0x00000008 -#define AOFF_task_semsleeping 0x00000558 +#define AOFF_task_semsleeping 0x00000560 #define ASIZ_task_semsleeping 0x00000008 -#define AOFF_task_tss 0x00000560 +#define AOFF_task_tss 0x00000570 #define ASIZ_task_tss 0x00000470 -#define AOFF_task_fs 0x000009d0 +#define AOFF_task_fs 0x000009e0 #define ASIZ_task_fs 0x00000008 -#define AOFF_task_files 0x000009d8 +#define AOFF_task_files 0x000009e8 #define ASIZ_task_files 0x00000008 -#define AOFF_task_mm 0x000009e0 +#define AOFF_task_mm 0x000009f0 #define ASIZ_task_mm 0x00000008 -#define AOFF_task_local_pages 0x000009e8 +#define AOFF_task_local_pages 0x000009f8 #define ASIZ_task_local_pages 0x00000010 -#define AOFF_task_allocation_order 0x000009f8 +#define AOFF_task_allocation_order 0x00000a08 #define ASIZ_task_allocation_order 0x00000004 -#define AOFF_task_nr_local_pages 0x000009fc +#define AOFF_task_nr_local_pages 0x00000a0c #define ASIZ_task_nr_local_pages 0x00000004 -#define AOFF_task_fs_locks 0x00000a00 +#define AOFF_task_fs_locks 0x00000a10 #define ASIZ_task_fs_locks 0x00000004 -#define AOFF_task_sigmask_lock 0x00000a04 +#define AOFF_task_sigmask_lock 0x00000a14 #define ASIZ_task_sigmask_lock 0x0000000c -#define AOFF_task_sig 0x00000a10 +#define AOFF_task_sig 0x00000a20 #define ASIZ_task_sig 0x00000008 -#define AOFF_task_signal 0x00000a18 +#define AOFF_task_signal 0x00000a28 #define ASIZ_task_signal 0x00000008 -#define AOFF_task_blocked 0x00000a20 +#define AOFF_task_blocked 0x00000a30 #define ASIZ_task_blocked 0x00000008 -#define AOFF_task_sigqueue 0x00000a28 +#define AOFF_task_sigqueue 0x00000a38 #define ASIZ_task_sigqueue 0x00000008 -#define AOFF_task_sigqueue_tail 0x00000a30 +#define AOFF_task_sigqueue_tail 0x00000a40 #define ASIZ_task_sigqueue_tail 0x00000008 -#define AOFF_task_sas_ss_sp 0x00000a38 +#define AOFF_task_sas_ss_sp 0x00000a48 #define ASIZ_task_sas_ss_sp 0x00000008 -#define AOFF_task_sas_ss_size 0x00000a40 +#define AOFF_task_sas_ss_size 0x00000a50 #define ASIZ_task_sas_ss_size 0x00000008 -#define AOFF_task_parent_exec_id 0x00000a48 +#define AOFF_task_parent_exec_id 0x00000a58 #define ASIZ_task_parent_exec_id 0x00000004 -#define AOFF_task_self_exec_id 0x00000a4c +#define AOFF_task_self_exec_id 0x00000a5c #define ASIZ_task_self_exec_id 0x00000004 -#define AOFF_task_oom_kill_try 0x00000a50 +#define AOFF_task_oom_kill_try 0x00000a60 #define ASIZ_task_oom_kill_try 0x00000004 -#define ASIZ_task 0x00000a60 +#define ASIZ_task 0x00000a70 #define AOFF_mm_mmap 0x00000000 #define ASIZ_mm_mmap 0x00000008 #define AOFF_mm_mmap_avl 0x00000008 diff -urN linux.orig/include/asm-sparc64/poll.h linux/include/asm-sparc64/poll.h --- linux.orig/include/asm-sparc64/poll.h Sun Mar 25 18:31:09 2001 +++ linux/include/asm-sparc64/poll.h Wed Aug 7 17:45:23 2002 @@ -11,6 +11,7 @@ #define POLLWRNORM POLLOUT #define POLLRDBAND 128 #define POLLWRBAND 256 +#define POLLMSG 512 struct pollfd { int fd; diff -urN linux.orig/include/linux/elfcore.h linux/include/linux/elfcore.h --- linux.orig/include/linux/elfcore.h Sun Mar 25 18:31:04 2001 +++ linux/include/linux/elfcore.h Wed Aug 7 17:44:43 2002 @@ -20,6 +20,7 @@ typedef elf_greg_t greg_t; typedef elf_gregset_t gregset_t; typedef elf_fpregset_t fpregset_t; +typedef elf_fpxregset_t fpxregset_t; #define NGREG ELF_NGREG #endif diff -urN linux.orig/include/linux/fs.h linux/include/linux/fs.h --- linux.orig/include/linux/fs.h Fri Nov 2 17:39:09 2001 +++ linux/include/linux/fs.h Wed Aug 7 17:46:35 2002 @@ -740,7 +740,7 @@ #define __getname() ((char *) __get_free_page(GFP_KERNEL)) #define putname(name) free_page((unsigned long)(name)) -extern void kill_fasync(struct fasync_struct *fa, int sig); +extern void kill_fasync(struct fasync_struct *fa, int sig, int band); extern int register_blkdev(unsigned int, const char *, struct file_operations *); extern int unregister_blkdev(unsigned int major, const char * name); extern int blkdev_open(struct inode * inode, struct file * filp); @@ -810,7 +810,6 @@ if (test_and_clear_bit(BH_Dirty, &bh->b_state)) { if (bh->b_list == BUF_DIRTY) refile_buffer(bh); - clear_bit(BH_Wait_IO, &bh->b_state); } } diff -urN linux.orig/include/linux/kernel.h linux/include/linux/kernel.h --- linux.orig/include/linux/kernel.h Sun Mar 25 18:31:03 2001 +++ linux/include/linux/kernel.h Wed Aug 7 17:44:43 2002 @@ -40,7 +40,6 @@ #define FASTCALL(x) x #endif -extern void math_error(void); extern struct notifier_block *panic_notifier_list; NORET_TYPE void panic(const char * fmt, ...) __attribute__ ((NORET_AND format (printf, 1, 2))); diff -urN linux.orig/include/linux/locks.h linux/include/linux/locks.h --- linux.orig/include/linux/locks.h Sun Mar 25 18:37:40 2001 +++ linux/include/linux/locks.h Wed Aug 7 17:46:35 2002 @@ -29,6 +29,7 @@ extern inline void unlock_buffer(struct buffer_head *bh) { clear_bit(BH_Lock, &bh->b_state); + clear_bit(BH_Wait_IO, &bh->b_state); wake_up(&bh->b_wait); } diff -urN linux.orig/include/linux/mm.h linux/include/linux/mm.h --- linux.orig/include/linux/mm.h Sun Mar 25 18:31:03 2001 +++ linux/include/linux/mm.h Wed Aug 7 17:57:18 2002 @@ -81,6 +81,8 @@ #define VM_LOCKED 0x2000 #define VM_IO 0x4000 /* Memory mapped I/O or similar */ +#define VM_RESERVED 0x8000 /* Don't unmap it from swap_out */ + #define VM_STACK_FLAGS 0x0177 /* @@ -347,13 +349,18 @@ #define GFP_DMA __GFP_DMA +extern int heap_stack_gap; + /* vma is the first one with address < vma->vm_end, * and even address < vma->vm_start. Have to extend vma. */ -static inline int expand_stack(struct vm_area_struct * vma, unsigned long address) +static inline int expand_stack(struct vm_area_struct * vma, unsigned long address, + struct vm_area_struct * prev_vma) { unsigned long grow; address &= PAGE_MASK; + if (prev_vma && prev_vma->vm_end + (heap_stack_gap << PAGE_SHIFT) > address) + return -ENOMEM; grow = vma->vm_start - address; if ((vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur) || @@ -371,6 +378,8 @@ /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); +extern struct vm_area_struct * find_vma_prev(struct mm_struct *, unsigned long, + struct vm_area_struct **); /* Look up the first VMA which intersects the interval start_addr..end_addr-1, NULL if none. Assume start_addr < end_addr. */ diff -urN linux.orig/include/linux/net.h linux/include/linux/net.h --- linux.orig/include/linux/net.h Sun Mar 25 18:31:03 2001 +++ linux/include/linux/net.h Wed Aug 7 17:45:23 2002 @@ -126,7 +126,7 @@ }; extern struct net_proto_family *net_families[]; -extern int sock_wake_async(struct socket *sk, int how); +extern int sock_wake_async(struct socket *sk, int how, int band); extern int sock_register(struct net_proto_family *fam); extern int sock_unregister(int family); extern struct socket *sock_alloc(void); diff -urN linux.orig/include/linux/sched.h linux/include/linux/sched.h --- linux.orig/include/linux/sched.h Fri Nov 2 17:39:09 2001 +++ linux/include/linux/sched.h Wed Aug 7 17:55:29 2002 @@ -260,6 +260,7 @@ long counter; long priority; cycles_t avg_slice; + int counter_refresh; /* SMP and runqueue state */ int has_cpu; int processor; @@ -394,7 +395,7 @@ */ #define INIT_TASK \ /* state etc */ { 0,0,0,KERNEL_DS,&default_exec_domain,0,0, \ -/* counter */ DEF_PRIORITY,DEF_PRIORITY,0, \ +/* counter */ DEF_PRIORITY,DEF_PRIORITY,0,0, \ /* SMP */ 0,0,0,-1, \ /* schedlink */ &init_task,&init_task, &init_task, &init_task, \ /* task_exclusive */ 0, \ diff -urN linux.orig/include/linux/sysctl.h linux/include/linux/sysctl.h --- linux.orig/include/linux/sysctl.h Sun Mar 25 18:37:40 2001 +++ linux/include/linux/sysctl.h Wed Aug 7 17:57:18 2002 @@ -123,7 +123,8 @@ VM_PAGECACHE=7, /* struct: Set cache memory thresholds */ VM_PAGERDAEMON=8, /* struct: Control kswapd behaviour */ VM_PGT_CACHE=9, /* struct: Set page table cache parameters */ - VM_PAGE_CLUSTER=10 /* int: set number of pages to swap together */ + VM_PAGE_CLUSTER=10, /* int: set number of pages to swap together */ + VM_HEAP_STACK_GAP=11, /* int: page gap between heap and stack */ }; diff -urN linux.orig/include/linux/time.h linux/include/linux/time.h --- linux.orig/include/linux/time.h Sun Mar 25 18:31:03 2001 +++ linux/include/linux/time.h Wed Aug 7 17:53:48 2002 @@ -46,11 +46,54 @@ value->tv_sec = jiffies / HZ; } +static __inline__ int +timespec_before(struct timespec a, struct timespec b) +{ + if (a.tv_sec == b.tv_sec) + return a.tv_nsec < b.tv_nsec; + return a.tv_sec < b.tv_sec; +} + +/* computes `a - b' and write the result in `result', assumes `a >= b' */ +static inline void +timespec_less(struct timespec a, struct timespec b, struct timespec * result) +{ + if (a.tv_nsec < b.tv_nsec) + { + a.tv_sec--; + a.tv_nsec += 1000000000; + } + + result->tv_sec = a.tv_sec - b.tv_sec; + result->tv_nsec = a.tv_nsec - b.tv_nsec; +} + struct timeval { time_t tv_sec; /* seconds */ suseconds_t tv_usec; /* microseconds */ }; +/* computes `a - b' and write the result in `result', assumes `a >= b' */ +static inline void +timeval_less(struct timeval a, struct timeval b, struct timeval * result) +{ + if (a.tv_usec < b.tv_usec) + { + a.tv_sec--; + a.tv_usec += 1000000; + } + + result->tv_sec = a.tv_sec - b.tv_sec; + result->tv_usec = a.tv_usec - b.tv_usec; +} + +static __inline__ void +timeval_to_timespec(struct timeval tv, struct timespec * ts) +{ + ts->tv_sec = tv.tv_sec; + ts->tv_nsec = (long) tv.tv_usec * 1000; +} + struct timezone { int tz_minuteswest; /* minutes west of Greenwich */ int tz_dsttime; /* type of dst correction */ diff -urN linux.orig/include/linux/wrapper.h linux/include/linux/wrapper.h --- linux.orig/include/linux/wrapper.h Sun Mar 25 18:31:04 2001 +++ linux/include/linux/wrapper.h Wed Aug 7 17:46:42 2002 @@ -33,6 +33,14 @@ #define vma_get_end(v) v->vm_end #define vma_get_page_prot(v) v->vm_page_prot +/* + * mem_map_reserve()/unreserve() are going to be obsoleted by + * setting the VM_RESERVED in vma->vm_flags. + * + * Instead of marking the pages as reserved, just mark the vma as reserved + * this will improve performance (it's zero cost unlike the PG_reserved check) + * and it will be trivial for not physically contigous mappings too. + */ #define mem_map_reserve(p) set_bit(PG_reserved, &mem_map[p].flags) #define mem_map_unreserve(p) clear_bit(PG_reserved, &mem_map[p].flags) #define mem_map_inc_count(p) atomic_inc(&(mem_map[p].count)) diff -urN linux.orig/kernel/exit.c linux/kernel/exit.c --- linux.orig/kernel/exit.c Fri Nov 2 17:39:16 2001 +++ linux/kernel/exit.c Wed Aug 7 17:55:29 2002 @@ -56,6 +56,17 @@ current->cmin_flt += p->min_flt + p->cmin_flt; current->cmaj_flt += p->maj_flt + p->cmaj_flt; current->cnswap += p->nswap + p->cnswap; + /* + * Potentially available timeslices are retrieved + * here - this way the parent does not get penalized + * for creating too many processes. + * + * (this cannot be used to artificially 'generate' + * timeslices, because any timeslice recovered here + * was given away by the parent in the first place.) + */ + if (!p->counter_refresh) + current->counter += p->counter; free_task_struct(p); } else { printk("task releasing itself\n"); @@ -150,6 +161,7 @@ p->exit_signal = SIGCHLD; p->self_exec_id++; p->p_opptr = child_reaper; /* init */ + p->counter_refresh = 1; if (p->pdeath_signal) send_sig(p->pdeath_signal, p, 0); } } diff -urN linux.orig/kernel/fork.c linux/kernel/fork.c --- linux.orig/kernel/fork.c Fri Nov 2 17:39:16 2001 +++ linux/kernel/fork.c Wed Aug 7 17:55:29 2002 @@ -700,6 +700,8 @@ */ current->counter >>= 1; p->counter = current->counter; + /* Tell the parent if it can get back its timeslice when child exits */ + p->counter_refresh = 0; /* * Ok, add it to the run-queues and make it diff -urN linux.orig/kernel/sched.c linux/kernel/sched.c --- linux.orig/kernel/sched.c Sun Mar 25 18:37:40 2001 +++ linux/kernel/sched.c Wed Aug 7 17:55:29 2002 @@ -212,101 +212,89 @@ } /* - * If there is a dependency between p1 and p2, - * don't be too eager to go into the slow schedule. - * In particular, if p1 and p2 both want the kernel - * lock, there is no point in trying to make them - * extremely parallel.. - * - * (No lock - lock_depth < 0) - * - * There are two additional metrics here: - * - * first, a 'cutoff' interval, currently 0-200 usecs on - * x86 CPUs, depending on the size of the 'SMP-local cache'. - * If the current process has longer average timeslices than - * this, then we utilize the idle CPU. - * - * second, if the wakeup comes from a process context, - * then the two processes are 'related'. (they form a - * 'gang') - * - * An idle CPU is almost always a bad thing, thus we skip - * the idle-CPU utilization only if both these conditions - * are true. (ie. a 'process-gang' rescheduling with rather - * high frequency should stay on the same CPU). - * - * [We can switch to something more finegrained in 2.3.] - * - * do not 'guess' if the to-be-scheduled task is RT. + * This is ugly, but reschedule_idle() is very timing-critical. + * We enter with the runqueue spinlock held, but we might end + * up unlocking it early, so the caller must not unlock the + * runqueue, it's always done by reschedule_idle(). */ -#define related(p1,p2) (((p1)->lock_depth >= 0) && (p2)->lock_depth >= 0) && \ - (((p2)->policy == SCHED_OTHER) && ((p1)->avg_slice < cacheflush_time)) - -static inline void reschedule_idle_slow(struct task_struct * p) +static inline void reschedule_idle(struct task_struct * p, unsigned long flags) { #ifdef __SMP__ -/* - * (see reschedule_idle() for an explanation first ...) - * - * Pass #2 - * - * We try to find another (idle) CPU for this woken-up process. - * - * On SMP, we mostly try to see if the CPU the task used - * to run on is idle.. but we will use another idle CPU too, - * at this point we already know that this CPU is not - * willing to reschedule in the near future. - * - * An idle CPU is definitely wasted, especially if this CPU is - * running long-timeslice processes. The following algorithm is - * pretty good at finding the best idle CPU to send this process - * to. - * - * [We can try to preempt low-priority processes on other CPUs in - * 2.3. Also we can try to use the avg_slice value to predict - * 'likely reschedule' events even on other CPUs.] - */ int this_cpu = smp_processor_id(), target_cpu; - struct task_struct *tsk, *target_tsk; - int cpu, best_cpu, weight, best_weight, i; - unsigned long flags; - - best_weight = 0; /* prevents negative weight */ - - spin_lock_irqsave(&runqueue_lock, flags); + struct task_struct *tsk; + int cpu, best_cpu, i; /* * shortcut if the woken up task's last CPU is * idle now. */ best_cpu = p->processor; - target_tsk = idle_task(best_cpu); - if (cpu_curr(best_cpu) == target_tsk) + tsk = idle_task(best_cpu); + if (cpu_curr(best_cpu) == tsk) goto send_now; - target_tsk = NULL; - for (i = 0; i < smp_num_cpus; i++) { + /* + * We know that the preferred CPU has a cache-affine current + * process, lets try to find a new idle CPU for the woken-up + * process: + */ + for (i = smp_num_cpus - 1; i >= 0; i--) { cpu = cpu_logical_map(i); + if (cpu == best_cpu) + continue; tsk = cpu_curr(cpu); - if (related(tsk, p)) - goto out_no_target; - weight = preemption_goodness(tsk, p, cpu); - if (weight > best_weight) { - best_weight = weight; - target_tsk = tsk; - } + /* + * We use the last available idle CPU. This creates + * a priority list between idle CPUs, but this is not + * a problem. + */ + if (tsk == idle_task(cpu)) + goto send_now; } /* - * found any suitable CPU? + * No CPU is idle, but maybe this process has enough priority + * to preempt it's preferred CPU. + */ + tsk = cpu_curr(best_cpu); + if (preemption_goodness(tsk, p, best_cpu) > 0) + goto send_now; + + /* + * We will get here often - or in the high CPU contention + * case. No CPU is idle and this process is either lowprio or + * the preferred CPU is highprio. Try to preemt some other CPU + * only if it's RT or if it's iteractive and the preferred + * cpu won't reschedule shortly. */ - if (!target_tsk) - goto out_no_target; + if ((p->avg_slice < cacheflush_time && cpu_curr(best_cpu)->avg_slice > cacheflush_time) || + ((p->policy & ~SCHED_YIELD) != SCHED_OTHER)) + { + int weight, best_weight = 0; + struct task_struct * best_tsk = NULL; + + for (i = smp_num_cpus - 1; i >= 0; i--) { + cpu = cpu_logical_map(i); + if (cpu == best_cpu) + continue; + tsk = cpu_curr(cpu); + weight = preemption_goodness(tsk, p, cpu); + if (weight > best_weight) { + best_weight = weight; + best_tsk = tsk; + } + } + + if ((tsk = best_tsk)) + goto send_now; + } + + spin_unlock_irqrestore(&runqueue_lock, flags); + return; send_now: - target_cpu = target_tsk->processor; - target_tsk->need_resched = 1; + target_cpu = tsk->processor; + tsk->need_resched = 1; spin_unlock_irqrestore(&runqueue_lock, flags); /* * the APIC stuff can go outside of the lock because @@ -315,9 +303,6 @@ if (target_cpu != this_cpu) smp_send_reschedule(target_cpu); return; -out_no_target: - spin_unlock_irqrestore(&runqueue_lock, flags); - return; #else /* UP */ int this_cpu = smp_processor_id(); struct task_struct *tsk; @@ -325,38 +310,10 @@ tsk = current; if (preemption_goodness(tsk, p, this_cpu) > 0) tsk->need_resched = 1; + spin_unlock_irqrestore(&runqueue_lock, flags); #endif } -static void reschedule_idle(struct task_struct * p) -{ -#ifdef __SMP__ - int cpu = smp_processor_id(); - /* - * ("wakeup()" should not be called before we've initialized - * SMP completely. - * Basically a not-yet initialized SMP subsystem can be - * considered as a not-yet working scheduler, simply dont use - * it before it's up and running ...) - * - * SMP rescheduling is done in 2 passes: - * - pass #1: faster: 'quick decisions' - * - pass #2: slower: 'lets try and find a suitable CPU' - */ - - /* - * Pass #1. (subtle. We might be in the middle of __switch_to, so - * to preserve scheduling atomicity we have to use cpu_curr) - */ - if ((p->processor == cpu) && related(cpu_curr(cpu), p)) - return; -#endif /* __SMP__ */ - /* - * Pass #2 - */ - reschedule_idle_slow(p); -} - /* * Careful! * @@ -453,9 +410,8 @@ if (p->next_run) goto out; add_to_runqueue(p); - spin_unlock_irqrestore(&runqueue_lock, flags); + reschedule_idle(p, flags); // spin_unlocks runqueue - reschedule_idle(p); return; out: spin_unlock_irqrestore(&runqueue_lock, flags); @@ -668,9 +624,13 @@ { #ifdef __SMP__ if ((prev->state == TASK_RUNNING) && - (prev != idle_task(smp_processor_id()))) - reschedule_idle(prev); - wmb(); + (prev != idle_task(smp_processor_id()))) { + unsigned long flags; + + spin_lock_irqsave(&runqueue_lock, flags); + reschedule_idle(prev, flags); // spin_unlocks runqueue + } + mb(); prev->has_cpu = 0; #endif /* __SMP__ */ } @@ -841,8 +801,10 @@ struct task_struct *p; spin_unlock_irq(&runqueue_lock); read_lock(&tasklist_lock); - for_each_task(p) + for_each_task(p) { p->counter = (p->counter >> 1) + p->priority; + p->counter_refresh = 1; + } read_unlock(&tasklist_lock); spin_lock_irq(&runqueue_lock); goto repeat_schedule; @@ -1922,6 +1884,7 @@ { struct timespec t; unsigned long expire; + struct timeval before, after; if(copy_from_user(&t, rqtp, sizeof(struct timespec))) return -EFAULT; @@ -1954,11 +1917,20 @@ expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec); current->state = TASK_INTERRUPTIBLE; + get_fast_time(&before); expire = schedule_timeout(expire); + get_fast_time(&after); if (expire) { if (rmtp) { - jiffies_to_timespec(expire, &t); + struct timespec elapsed; + + timeval_less(after, before, &after); + timeval_to_timespec(after, &elapsed); + if (timespec_before(elapsed, t)) + timespec_less(t, elapsed, &t); + else + t.tv_nsec = t.tv_sec = 0; if (copy_to_user(rmtp, &t, sizeof(struct timespec))) return -EFAULT; } diff -urN linux.orig/kernel/sysctl.c linux/kernel/sysctl.c --- linux.orig/kernel/sysctl.c Sun Mar 25 18:37:40 2001 +++ linux/kernel/sysctl.c Wed Aug 7 17:57:18 2002 @@ -272,6 +272,8 @@ &pgt_cache_water, 2*sizeof(int), 0600, NULL, &proc_dointvec}, {VM_PAGE_CLUSTER, "page-cluster", &page_cluster, sizeof(int), 0600, NULL, &proc_dointvec}, + {VM_HEAP_STACK_GAP, "heap-stack-gap", + &heap_stack_gap, sizeof(int), 0644, NULL, &proc_dointvec}, {0} }; diff -urN linux.orig/mm/filemap.c linux/mm/filemap.c --- linux.orig/mm/filemap.c Sun Mar 25 18:37:40 2001 +++ linux/mm/filemap.c Wed Aug 7 17:56:23 2002 @@ -338,13 +338,13 @@ wait.task = tsk; add_wait_queue(&page->wait, &wait); -repeat: - tsk->state = TASK_UNINTERRUPTIBLE; - sync_page(page); - if (PageLocked(page)) { + do { + set_current_state(TASK_UNINTERRUPTIBLE); + sync_page(page); + if (!PageLocked(page)) + break; schedule(); - goto repeat; - } + } while (PageLocked(page)); tsk->state = TASK_RUNNING; remove_wait_queue(&page->wait, &wait); } @@ -560,9 +560,11 @@ * accessed sequentially. */ if (ahead) { +#if 0 if (reada_ok == 2) { run_task_queue(&tq_disk); } +#endif filp->f_ralen += ahead; filp->f_rawin += filp->f_ralen; diff -urN linux.orig/mm/mmap.c linux/mm/mmap.c --- linux.orig/mm/mmap.c Sun Mar 25 18:31:02 2001 +++ linux/mm/mmap.c Wed Aug 7 17:57:18 2002 @@ -40,6 +40,7 @@ kmem_cache_t *vm_area_cachep; int sysctl_overcommit_memory; +int heap_stack_gap = 1; /* Check that a process has enough memory to allocate a * new virtual mapping. @@ -66,7 +67,6 @@ free += page_cache_size; free += nr_free_pages; free += nr_swap_pages; - free -= (page_cache.min_percent + buffer_mem.min_percent + 2)*num_physpages/100; return free > pages; } @@ -371,9 +371,14 @@ for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { /* At this point: (!vmm || addr < vmm->vm_end). */ + unsigned long __heap_stack_gap = 0; if (TASK_SIZE - len < addr) return 0; - if (!vmm || addr + len <= vmm->vm_start) + if (!vmm) + return addr; + if (vmm->vm_flags & VM_GROWSDOWN) + __heap_stack_gap = heap_stack_gap << PAGE_SHIFT; + if (addr + len + __heap_stack_gap <= vmm->vm_start) return addr; addr = vmm->vm_end; } diff -urN linux.orig/mm/mremap.c linux/mm/mremap.c --- linux.orig/mm/mremap.c Sun Mar 25 18:31:03 2001 +++ linux/mm/mremap.c Wed Aug 7 17:53:05 2002 @@ -127,7 +127,7 @@ new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (new_vma) { - unsigned long new_addr = get_unmapped_area(addr, new_len); + unsigned long new_addr = get_unmapped_area(0, new_len); if (new_addr && !move_page_tables(current->mm, new_addr, addr, old_len)) { *new_vma = *vma; diff -urN linux.orig/mm/swap.c linux/mm/swap.c --- linux.orig/mm/swap.c Sun Mar 25 18:31:02 2001 +++ linux/mm/swap.c Wed Aug 7 17:43:00 2002 @@ -47,13 +47,13 @@ atomic_t nr_async_pages = ATOMIC_INIT(0); buffer_mem_t buffer_mem = { - 2, /* minimum percent buffer */ + 4, /* minimum percent buffer */ 10, /* borrow percent buffer */ 60 /* maximum percent buffer */ }; buffer_mem_t page_cache = { - 2, /* minimum percent page cache */ + 4, /* minimum percent page cache */ 15, /* borrow percent page cache */ 75 /* maximum */ }; diff -urN linux.orig/mm/vmscan.c linux/mm/vmscan.c --- linux.orig/mm/vmscan.c Tue May 21 01:32:35 2002 +++ linux/mm/vmscan.c Wed Aug 7 17:46:42 2002 @@ -256,7 +256,7 @@ unsigned long end; /* Don't swap out areas which are locked down */ - if (vma->vm_flags & VM_LOCKED) + if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) return 0; pgdir = pgd_offset(tsk->mm, address); diff -urN linux.orig/net/core/sock.c linux/net/core/sock.c --- linux.orig/net/core/sock.c Fri Nov 2 17:39:16 2001 +++ linux/net/core/sock.c Wed Aug 7 17:45:23 2002 @@ -1025,7 +1025,7 @@ { if (!sk->dead) { wake_up_interruptible(sk->sleep); - sock_wake_async(sk->socket,0); + sock_wake_async(sk->socket,0,POLL_ERR); } } @@ -1033,7 +1033,7 @@ { if(!sk->dead) { wake_up_interruptible(sk->sleep); - sock_wake_async(sk->socket,1); + sock_wake_async(sk->socket,1,POLL_IN); } } @@ -1048,7 +1048,7 @@ /* Should agree with poll, otherwise some programs break */ if (sock_writeable(sk)) - sock_wake_async(sk->socket, 2); + sock_wake_async(sk->socket, 2, POLL_OUT); } } diff -urN linux.orig/net/ipv4/tcp.c linux/net/ipv4/tcp.c --- linux.orig/net/ipv4/tcp.c Fri Nov 2 17:39:16 2001 +++ linux/net/ipv4/tcp.c Wed Aug 7 17:45:23 2002 @@ -615,7 +615,7 @@ wake_up_interruptible(sk->sleep); if (sock_wspace(sk) >= tcp_min_write_space(sk)) - sock_wake_async(sk->socket, 2); + sock_wake_async(sk->socket, 2, POLL_OUT); } diff -urN linux.orig/net/ipv4/tcp_input.c linux/net/ipv4/tcp_input.c --- linux.orig/net/ipv4/tcp_input.c Fri Nov 2 17:39:16 2001 +++ linux/net/ipv4/tcp_input.c Wed Aug 7 17:58:04 2002 @@ -97,6 +97,7 @@ */ static void tcp_delack_estimator(struct tcp_opt *tp) { + tcp_exit_quickack_mode(tp); if(tp->ato == 0) { tp->lrcvtime = tcp_time_stamp; @@ -115,10 +116,7 @@ if(m > tp->rto) tp->ato = tp->rto; else { - /* This funny shift makes sure we - * clear the "quick ack mode" bit. - */ - tp->ato = ((tp->ato << 1) >> 2) + m; + tp->ato = (tp->ato >> 1) + m; } } } @@ -1183,7 +1181,7 @@ if (!sk->dead) { sk->state_change(sk); - sock_wake_async(sk->socket, 1); + sock_wake_async(sk->socket, 1, POLL_HUP); } } @@ -1697,6 +1695,7 @@ kill_proc(sk->proc, SIGURG, 1); else kill_pg(-sk->proc, SIGURG, 1); + sock_wake_async(sk->socket, 3, POLL_PRI); } /* We may be adding urgent data when the last byte read was @@ -1933,19 +1932,23 @@ tcp_send_ack(sk); goto discard; } - - if(th->syn && TCP_SKB_CB(skb)->seq != tp->syn_seq) { - SOCK_DEBUG(sk, "syn in established state\n"); - tcp_statistics.TcpInErrs++; - tcp_reset(sk); - return 1; - } if(th->rst) { tcp_reset(sk); goto discard; } + if(th->syn) { + if (TCP_SKB_CB(skb)->seq != tp->syn_seq || + skb->len > th->doff*4) { + tcp_statistics.TcpInErrs++; + tcp_reset(sk); + return 1; + } + tcp_send_ack(sk); + goto discard; + } + if(th->ack) tcp_ack(sk, th, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->ack_seq, len); @@ -2244,7 +2247,7 @@ if(!sk->dead) { sk->state_change(sk); - sock_wake_async(sk->socket, 0); + sock_wake_async(sk->socket, 0, POLL_OUT); } } else { if(th->syn && !th->rst) { diff -urN linux.orig/net/ipv4/tcp_ipv4.c linux/net/ipv4/tcp_ipv4.c --- linux.orig/net/ipv4/tcp_ipv4.c Sun Mar 25 18:37:41 2001 +++ linux/net/ipv4/tcp_ipv4.c Wed Aug 7 17:49:21 2002 @@ -1404,6 +1404,7 @@ newtp->snd_una = req->snt_isn + 1; newtp->srtt = 0; newtp->ato = 0; + tcp_enter_quickack_mode(newtp); newtp->snd_wl1 = req->rcv_isn; newtp->snd_wl2 = req->snt_isn; @@ -1958,6 +1959,7 @@ skb_queue_head_init(&tp->out_of_order_queue); tcp_init_xmit_timers(sk); + tcp_enter_quickack_mode(tp); tp->rto = TCP_TIMEOUT_INIT; /*TCP_WRITE_TIME*/ tp->mdev = TCP_TIMEOUT_INIT; tp->mss_clamp = ~0; diff -urN linux.orig/net/ipv4/tcp_output.c linux/net/ipv4/tcp_output.c --- linux.orig/net/ipv4/tcp_output.c Fri Nov 2 17:39:16 2001 +++ linux/net/ipv4/tcp_output.c Wed Aug 7 17:49:21 2002 @@ -1044,6 +1044,17 @@ timeout = (tp->ato << 1) >> 1; if (timeout > max_timeout) timeout = max_timeout; + if (!timeout) + { + timeout = tp->rto; + if ((signed) timeout <= 0) + { + printk(KERN_ERR + "tcp_send_delayed_ack: rto %ld!\n", timeout); + timeout = 1; + } + timeout = min(timeout, max_timeout); + } timeout += jiffies; /* Use new timeout only if there wasn't a older one earlier. */ diff -urN linux.orig/net/ipv4/tcp_timer.c linux/net/ipv4/tcp_timer.c --- linux.orig/net/ipv4/tcp_timer.c Sun Mar 25 18:31:12 2001 +++ linux/net/ipv4/tcp_timer.c Wed Aug 7 17:49:21 2002 @@ -195,7 +195,21 @@ if (!atomic_read(&sk->sock_readers)) tcp_send_ack(sk); else - tcp_send_delayed_ack(&(sk->tp_pinfo.af_tcp), HZ/10); + { + struct tcp_opt * tp = &(sk->tp_pinfo.af_tcp); + int rto; + + rto = tp->rto; + if (rto <= 0) + { + printk(KERN_ERR + "tcp_delack_timer: rto %d!\n", rto); + rto = 1; + } + rto = min(rto, HZ/10); + tp->delack_timer.expires = rto + jiffies; + add_timer(&tp->delack_timer); + } } } diff -urN linux.orig/net/ipv6/ip6_output.c linux/net/ipv6/ip6_output.c --- linux.orig/net/ipv6/ip6_output.c Sun Mar 25 18:31:13 2001 +++ linux/net/ipv6/ip6_output.c Wed Aug 7 17:51:14 2002 @@ -627,7 +627,7 @@ struct ipv6hdr *hdr = skb->nh.ipv6h; struct inet6_skb_parm *opt =(struct inet6_skb_parm*)skb->cb; - if (ipv6_devconf.forwarding == 0 && opt->srcrt == 0) + if (ipv6_devconf.forwarding == 0) goto drop; /* diff -urN linux.orig/net/socket.c linux/net/socket.c --- linux.orig/net/socket.c Sun Mar 25 18:37:41 2001 +++ linux/net/socket.c Wed Aug 7 17:45:23 2002 @@ -546,7 +546,7 @@ return 0; } -int sock_wake_async(struct socket *sock, int how) +int sock_wake_async(struct socket *sock, int how, int band) { if (!sock || !sock->fasync_list) return -1; @@ -563,8 +563,10 @@ /* fall through */ case 0: call_kill: - kill_fasync(sock->fasync_list, SIGIO); + kill_fasync(sock->fasync_list, SIGIO, band); break; + case 3: + kill_fasync(sock->fasync_list, SIGURG, band); } return 0; } diff -urN linux.orig/net/unix/af_unix.c linux/net/unix/af_unix.c --- linux.orig/net/unix/af_unix.c Tue May 21 01:32:35 2002 +++ linux/net/unix/af_unix.c Wed Aug 7 17:47:52 2002 @@ -1017,9 +1017,6 @@ } if (!other) { - err = -ECONNRESET; - if(sunaddr==NULL) - goto out_free; other = unix_find_other(sunaddr, namelen, sk->type, hash, &err); if (other==NULL) goto out_free;