1 diff -ru usr/src/nv/nv-vm.c usr/src/nv.1386866/nv-vm.c
2 --- usr/src/nv/nv-vm.c 2005-03-22 16:19:09.000000000 +0100
3 +++ usr/src/nv.1386866/nv-vm.c 2005-10-11 19:33:24.821108608 +0200
5 * conflicts. we try to rely on the kernel's provided interfaces when possible,
6 * but need additional flushing on earlier kernels.
8 -#if defined(KERNEL_2_4)
9 -/* wrap CACHE_FLUSH so we can pass it to smp_call_function */
10 -static void cache_flush(void *p)
17 * 2.4 kernels handle flushing in the change_page_attr() call, but kernels
18 * earlier than 2.4.27 don't flush on cpus that support Self Snoop, so we
19 * manually flush on these kernels (actually, we go ahead and flush on all
20 * 2.4 kernels, as it's possible some others may be missing this fix and
21 * we'd prefer to be a little slower flushing caches than hanging the
22 - * system. 2.6 kernels split the flushing out to a seperate call,
23 - * global_flush_tlb(), so we rely on that.
25 + * 2.6 kernels split the flushing out to a seperate call,
26 + * global_flush_tlb(), so we rely on that. however, there are some 2.6
27 + * x86_64 kernels that do not properly flush. for now, we'll flush on all
28 + * potential kernels, as it's slightly slower, but safer.
30 +#if defined(KERNEL_2_4) || (defined(KERNEL_2_6) && defined(NVCPU_X86_64))
31 +#define NV_CPA_NEEDS_FLUSHING 1
34 +#if defined(NV_CPA_NEEDS_FLUSHING)
35 +static void cache_flush(void *p)
37 + unsigned long reg0, reg1;
41 + // flush global TLBs
42 +#if defined (NVCPU_X86)
43 + asm volatile("movl %%cr4, %0; \n"
44 + "andl $~0x80, %0; \n"
45 + "movl %0, %%cr4; \n"
46 + "movl %%cr3, %1; \n"
47 + "movl %1, %%cr3; \n"
49 + "movl %0, %%cr4; \n"
50 + : "=&r" (reg0), "=&r" (reg1)
53 + asm volatile("movq %%cr4, %0; \n"
54 + "andq $~0x80, %0; \n"
55 + "movq %0, %%cr4; \n"
56 + "movq %%cr3, %1; \n"
57 + "movq %1, %%cr3; \n"
59 + "movq %0, %%cr4; \n"
60 + : "=&r" (reg0), "=&r" (reg1)
66 static void nv_flush_caches(void)
68 -#if defined(KERNEL_2_4)
69 +#if defined(NV_CPA_NEEDS_FLUSHING)
71 smp_call_function(cache_flush, NULL, 1, 1);