]> git.pld-linux.org Git - packages/kernel.git/blob - linux-2.4.0-raid5xor.patch
- this patch fix twice EXPORT_SYMBOL(br_ioctl_hook); in kernel-source net/netsyms.c
[packages/kernel.git] / linux-2.4.0-raid5xor.patch
1 --- linux/include/asm-i386/xor.h-o      Sat Dec 23 08:13:50 2000
2 +++ linux/include/asm-i386/xor.h        Mon Jan  8 16:33:26 2001
3 @@ -13,6 +13,8 @@
4   * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
5   */
6  
7 +#include <linux/config.h>
8 +
9  /*
10   * High-speed RAID5 checksumming functions utilizing MMX instructions.
11   * Copyright (C) 1998 Ingo Molnar.
12 @@ -525,6 +527,8 @@
13  #undef FPU_SAVE
14  #undef FPU_RESTORE
15  
16 +#if defined(CONFIG_X86_FXSR) || defined(CONFIG_X86_RUNTIME_FXSR)
17 +
18  /*
19   * Cache avoiding checksumming functions utilizing KNI instructions
20   * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
21 @@ -835,6 +839,26 @@
22          do_5: xor_sse_5,
23  };
24  
25 +#define XOR_SSE2 \
26 +               if (cpu_has_xmm)                        \
27 +                       xor_speed(&xor_block_pIII_sse); 
28 +
29 +
30 +/* We force the use of the SSE xor block because it can write around L2.
31 +   We may also be able to load into the L1 only depending on how the cpu
32 +   deals with a load to a line that is being prefetched.  */
33 +#define XOR_SELECT_TEMPLATE(FASTEST) \
34 +       (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
35 +
36 +#else
37 +
38 +/* Don't try any SSE2 when FXSR is not enabled, because OSFXSR will not be set
39 +   -AK */ 
40 +#define XOR_SSE2 
41 +#define XOR_SELECT_TEMPLATE(FASTEST) (FASTEST)
42 +
43 +#endif
44 +
45  /* Also try the generic routines.  */
46  #include <asm-generic/xor.h>
47  
48 @@ -843,16 +867,9 @@
49         do {                                            \
50                 xor_speed(&xor_block_8regs);            \
51                 xor_speed(&xor_block_32regs);           \
52 -               if (cpu_has_xmm)                        \
53 -                       xor_speed(&xor_block_pIII_sse); \
54 +               XOR_SSE2        \
55                 if (md_cpu_has_mmx()) {                 \
56                         xor_speed(&xor_block_pII_mmx);  \
57                         xor_speed(&xor_block_p5_mmx);   \
58                 }                                       \
59         } while (0)
60 -
61 -/* We force the use of the SSE xor block because it can write around L2.
62 -   We may also be able to load into the L1 only depending on how the cpu
63 -   deals with a load to a line that is being prefetched.  */
64 -#define XOR_SELECT_TEMPLATE(FASTEST) \
65 -       (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
66 -
This page took 0.036693 seconds and 3 git commands to generate.