]>
Commit | Line | Data |
---|---|---|
ad984aeb | 1 | diff -ru usr/src/nv/nv-vm.c usr/src/nv.1386866/nv-vm.c |
2 | --- usr/src/nv/nv-vm.c 2005-03-22 16:19:09.000000000 +0100 | |
3 | +++ usr/src/nv.1386866/nv-vm.c 2005-10-11 19:33:24.821108608 +0200 | |
4 | @@ -58,26 +58,57 @@ | |
5 | * conflicts. we try to rely on the kernel's provided interfaces when possible, | |
6 | * but need additional flushing on earlier kernels. | |
7 | */ | |
8 | -#if defined(KERNEL_2_4) | |
9 | -/* wrap CACHE_FLUSH so we can pass it to smp_call_function */ | |
10 | -static void cache_flush(void *p) | |
11 | -{ | |
12 | - CACHE_FLUSH(); | |
13 | -} | |
14 | -#endif | |
15 | - | |
16 | /* | |
17 | * 2.4 kernels handle flushing in the change_page_attr() call, but kernels | |
18 | * earlier than 2.4.27 don't flush on cpus that support Self Snoop, so we | |
19 | * manually flush on these kernels (actually, we go ahead and flush on all | |
20 | * 2.4 kernels, as it's possible some others may be missing this fix and | |
21 | * we'd prefer to be a little slower flushing caches than hanging the | |
22 | - * system. 2.6 kernels split the flushing out to a seperate call, | |
23 | - * global_flush_tlb(), so we rely on that. | |
24 | + * system. | |
25 | + * 2.6 kernels split the flushing out to a seperate call, | |
26 | + * global_flush_tlb(), so we rely on that. however, there are some 2.6 | |
27 | + * x86_64 kernels that do not properly flush. for now, we'll flush on all | |
28 | + * potential kernels, as it's slightly slower, but safer. | |
29 | */ | |
30 | +#if defined(KERNEL_2_4) || (defined(KERNEL_2_6) && defined(NVCPU_X86_64)) | |
31 | +#define NV_CPA_NEEDS_FLUSHING 1 | |
32 | +#endif | |
33 | + | |
34 | +#if defined(NV_CPA_NEEDS_FLUSHING) | |
35 | +static void cache_flush(void *p) | |
36 | +{ | |
37 | + unsigned long reg0, reg1; | |
38 | + | |
39 | + CACHE_FLUSH(); | |
40 | + | |
41 | + // flush global TLBs | |
42 | +#if defined (NVCPU_X86) | |
43 | + asm volatile("movl %%cr4, %0; \n" | |
44 | + "andl $~0x80, %0; \n" | |
45 | + "movl %0, %%cr4; \n" | |
46 | + "movl %%cr3, %1; \n" | |
47 | + "movl %1, %%cr3; \n" | |
48 | + "orl $0x80, %0; \n" | |
49 | + "movl %0, %%cr4; \n" | |
50 | + : "=&r" (reg0), "=&r" (reg1) | |
51 | + : : "memory"); | |
52 | +#else | |
53 | + asm volatile("movq %%cr4, %0; \n" | |
54 | + "andq $~0x80, %0; \n" | |
55 | + "movq %0, %%cr4; \n" | |
56 | + "movq %%cr3, %1; \n" | |
57 | + "movq %1, %%cr3; \n" | |
58 | + "orq $0x80, %0; \n" | |
59 | + "movq %0, %%cr4; \n" | |
60 | + : "=&r" (reg0), "=&r" (reg1) | |
61 | + : : "memory"); | |
62 | +#endif | |
63 | +} | |
64 | +#endif | |
65 | + | |
66 | static void nv_flush_caches(void) | |
67 | { | |
68 | -#if defined(KERNEL_2_4) | |
69 | +#if defined(NV_CPA_NEEDS_FLUSHING) | |
70 | #ifdef CONFIG_SMP | |
71 | smp_call_function(cache_flush, NULL, 1, 1); | |
72 | #endif |