]> git.pld-linux.org Git - packages/X11-driver-firegl.git/blob - X11-driver-firegl-kernel-2_6_11.patch
- release 3 to rebuild with fixed viak8t patch
[packages/X11-driver-firegl.git] / X11-driver-firegl-kernel-2_6_11.patch
1 --- X11-driver-firegl-8.10.19/lib/modules/fglrx/build_mod/agpgart_be.c.orig     2005-01-31 19:50:00.000000000 +0200
2 +++ X11-driver-firegl-8.10.19/lib/modules/fglrx/build_mod/agpgart_be.c  2005-03-02 22:19:25.000000000 +0200
3 @@ -261,6 +261,12 @@
4  #define firegl_pci_find_class(class,from) pci_find_class(class,from)
5  #endif
6  
7 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) 
8 +#define firegl_pci_find_class(class,from) pci_get_class(class,from)
9 +#else
10 +#define firegl_pci_find_class(class,from) pci_find_class(class,from)
11 +#endif
12 +
13  int agp_backend_acquire(void)
14  {
15         if (agp_bridge.type == NOT_SUPPORTED) {
16 @@ -4075,6 +4081,13 @@
17                         }
18                 }
19         }
20 +
21 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) 
22 +            // the reference count has been increased in agp_backend_initialize.
23 +        if (device)
24 +            pci_dev_put(device); 
25 +#endif
26 +
27         /*
28          * PASS3: Figure out the 8X/4X setting and enable the
29          *        target (our motherboard chipset).
30 @@ -5283,6 +5296,12 @@
31              pci_dev_put(device); 
32  #endif
33  
34 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) 
35 +            // the reference count has been increased in agp_backend_initialize.
36 +        if (device)
37 +            pci_dev_put(device); 
38 +#endif
39 +
40      return(0); /* success */
41  }
42  
43 @@ -7428,6 +7447,11 @@
44                 return rc;
45         }
46  
47 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
48 +        // decrease the reference count.
49 +        pci_dev_put(agp_bridge.dev);
50 +#endif
51 +
52         if (agp_bridge.needs_scratch_page == TRUE) {
53                 agp_bridge.scratch_page = agp_bridge.agp_alloc_page();
54  
55 --- X11-driver-firegl-8.10.19/lib/modules/fglrx/build_mod/firegl_public.c.orig  2005-02-10 04:15:00.000000000 +0200
56 +++ X11-driver-firegl-8.10.19/lib/modules/fglrx/build_mod/firegl_public.c       2005-03-02 22:21:42.000000000 +0200
57 @@ -1664,13 +1664,22 @@
58  {
59      unsigned long pte_linear;
60      pgd_t* pgd_p;
61 +#if LINUX_VERSION_CODE > 0x02060a     /* KERNEL_VERSION(2,6,11) */
62 +    pud_t* pud_p;
63 +#endif
64      pmd_t* pmd_p;
65      pte_t* pte_p;
66      pte_t  pte;
67  
68      pte_linear = VMALLOC_VMADDR(virtual_addr);  // convert to pte linear address (x86 => nop)
69      pgd_p = pgd_offset(mm, pte_linear);
70 +#if LINUX_VERSION_CODE > 0x02060a     /* KERNEL_VERSION(2,6,11) */
71 +    pud_p = pud_offset(pgd_p, pte_linear);
72 +    pmd_p = pmd_offset(pud_p, pte_linear);
73 +#else
74      pmd_p = pmd_offset(pgd_p, pte_linear);
75 +#endif
76 +
77  #ifndef FGL_ATOMIC_PTE
78  #if LINUX_VERSION_CODE > 0x020500
79      pte_p = pte_offset_kernel(pmd_p, pte_linear);
80 @@ -2100,6 +2109,9 @@
81                                                     unsigned long address)
82  {
83      pgd_t* pgd_p;
84 +#if LINUX_VERSION_CODE > 0x02060a     /* KERNEL_VERSION(2,6,11) */
85 +    pud_t* pud_p;
86 +#endif
87      pmd_t* pmd_p;
88      pte_t* pte_p;
89      pte_t  pte;
90 @@ -2200,7 +2212,12 @@
91          /* alternatively we could generate a NOPAGE_OOM "out of memory" */
92      }
93      /*  locate medium level page table (x86 => nop) */
94 +#if LINUX_VERSION_CODE > 0x02060a     /* KERNEL_VERSION(2,6,11) */
95 +    pud_p = pud_offset(pgd_p, pte_linear);
96 +    pmd_p = pmd_offset(pud_p, pte_linear);
97 +#else
98      pmd_p = pmd_offset(pgd_p, pte_linear);
99 +#endif
100      if (!pmd_present(*pmd_p))
101      {
102          __KE_ERROR("FATAL ERROR: User queue buffer not present! (pmd)\n");
103 @@ -2564,13 +2581,21 @@
104  {
105      unsigned long pte_linear;
106      pgd_t* pgd_p;
107 +#if LINUX_VERSION_CODE > 0x02060a     /* KERNEL_VERSION(2,6,11) */
108 +    pud_t* pud_p;
109 +#endif
110      pmd_t* pmd_p;
111      pte_t* pte_p;
112      pte_t  pte;
113  
114      pte_linear = VMALLOC_VMADDR(virtual_addr);  // convert to pte linear address (x86 => nop)
115      pgd_p = pgd_offset(vma->vm_mm, pte_linear);
116 +#if LINUX_VERSION_CODE > 0x02060a     /* KERNEL_VERSION(2,6,11) */
117 +    pud_p = pud_offset(pgd_p, pte_linear);
118 +    pmd_p = pmd_offset(pud_p, pte_linear);
119 +#else
120      pmd_p = pmd_offset(pgd_p, pte_linear);
121 +#endif
122  #ifndef FGL_ATOMIC_PTE
123  #if LINUX_VERSION_CODE > 0x020500
124      pte_p = pte_offset_kernel(pmd_p, pte_linear);
125 @@ -2719,13 +2744,13 @@
126  #endif /* __ia64__ */
127                  vma->vm_flags |= VM_IO; /* not in core dump */
128              }
129 -            if (remap_page_range(FGL_VMA_API_PASS
130 +            if (remap_pfn_range(FGL_VMA_API_PASS
131                                   vma->vm_start,
132 -                                 __ke_vm_offset(vma),
133 +                                 __ke_vm_offset(vma) >> PAGE_SHIFT,
134                                   vma->vm_end - vma->vm_start,
135                                   vma->vm_page_prot))
136              {
137 -                __KE_DEBUG("remap_page_range failed\n");
138 +                __KE_DEBUG("remap_pfn_range failed\n");
139                  return -EAGAIN;
140              }
141              vma->vm_flags |= VM_SHM | VM_RESERVED; /* Don't swap */
142 @@ -2786,13 +2811,13 @@
143                         {
144                                 if (__ke_vm_offset(vma) >= __pa(high_memory))
145                                         vma->vm_flags |= VM_IO; /* not in core dump */
146 -                               if (remap_page_range(FGL_VMA_API_PASS
147 +                               if (remap_pfn_range(FGL_VMA_API_PASS
148                                                                          vma->vm_start,
149 -                                                                        __ke_vm_offset(vma),
150 +                                                                        __ke_vm_offset(vma) >> PAGE_SHIFT,
151                                                                          vma->vm_end - vma->vm_start,
152                                                                          vma->vm_page_prot))
153                                 {
154 -                                       __KE_DEBUG("remap_page_range failed\n");
155 +                                       __KE_DEBUG("remap_pfn_range failed\n");
156                                         return -EAGAIN;
157                                 }
158  #ifdef __x86_64__
159 @@ -2823,13 +2848,13 @@
160                         {
161                                 if (__ke_vm_offset(vma) >= __pa(high_memory))
162                                         vma->vm_flags |= VM_IO; /* not in core dump */
163 -                               if (remap_page_range(FGL_VMA_API_PASS
164 +                               if (remap_pfn_range(FGL_VMA_API_PASS
165                                                                          vma->vm_start,
166 -                                                                        __ke_vm_offset(vma),
167 +                                                                        __ke_vm_offset(vma) >> PAGE_SHIFT,
168                                                                          vma->vm_end - vma->vm_start,
169                                                                          vma->vm_page_prot))
170                                 {
171 -                                       __KE_DEBUG("remap_page_range failed\n");
172 +                                       __KE_DEBUG("remap_pfn_range failed\n");
173                                         return -EAGAIN;
174                                 }
175  #ifdef __x86_64__
176 @@ -2873,6 +2898,37 @@
177  
178  #if LINUX_VERSION_CODE >= 0x020400
179  
180 +#if LINUX_VERSION_CODE >= 0x02060b
181 +
182 +typedef struct {
183 +       void                    (*free_memory)(struct agp_memory *);
184 +       struct agp_memory *     (*allocate_memory)(size_t, u32);
185 +       int                     (*bind_memory)(struct agp_memory *, off_t);
186 +       int                     (*unbind_memory)(struct agp_memory *);
187 +       void                    (*enable)(u32);
188 +       int                     (*acquire)(void);
189 +       void                    (*release)(void);
190 +       int                     (*copy_info)(struct agp_kern_info *);
191 +} drm_agp_t;
192 +
193 +static const drm_agp_t drm_agp = {
194 +       &agp_free_memory,
195 +       &agp_allocate_memory,
196 +       &agp_bind_memory,
197 +       &agp_unbind_memory,
198 +       &agp_enable,
199 +       &agp_backend_acquire,
200 +       &agp_backend_release,
201 +       &agp_copy_info
202 +};
203 +#undef DRM_AGP_MODULE_GET
204 +#undef DRM_AGP_MODULE_PUT
205 +
206 +#define DRM_AGP_MODULE_GET      &drm_agp
207 +#define DRM_AGP_MODULE_PUT 
208 +
209 +#endif
210 +
211  static const drm_agp_t  *drm_agp_module_stub = NULL;
212  
213  #define AGP_FUNCTIONS          8
This page took 0.03523 seconds and 3 git commands to generate.