]> git.pld-linux.org Git - packages/xorg-driver-video-nvidia.git/blame - NVIDIA_kernel-1.0-6629-1161283.diff
- up to version 1.0.8178, seems to be stable on i686 and kernel 2.6.14.5-1
[packages/xorg-driver-video-nvidia.git] / NVIDIA_kernel-1.0-6629-1161283.diff
CommitLineData
517a48eb
PS
1diff -ru usr/src/nv/nv-linux.h usr/src/nv.1161283/nv-linux.h
2--- usr/src/nv/nv-linux.h 2004-11-03 22:53:00.000000000 +0100
3+++ usr/src/nv.1161283/nv-linux.h 2004-11-16 22:56:41.000000000 +0100
4@@ -429,6 +429,30 @@
5 free_pages(ptr, order); \
6 }
7
8+#define NV_KMEM_CACHE_CREATE(kmem_cache, name, type) \
9+ { \
10+ kmem_cache = kmem_cache_create(name, sizeof(type), \
11+ 0, 0, NULL, NULL); \
12+ }
13+
14+#define NV_KMEM_CACHE_DESTROY(kmem_cache) \
15+ { \
16+ kmem_cache_destroy(kmem_cache); \
17+ kmem_cache = NULL; \
18+ }
19+
20+#define NV_KMEM_CACHE_ALLOC(ptr, kmem_cache, type) \
21+ { \
22+ (ptr) = kmem_cache_alloc(kmem_cache, GFP_KERNEL); \
23+ KM_ALLOC_RECORD(ptr, sizeof(type), "km_cache_alloc"); \
24+ }
25+
26+#define NV_KMEM_CACHE_FREE(ptr, type, kmem_cache) \
27+ { \
28+ KM_FREE_RECORD(ptr, sizeof(type), "km_cache_free"); \
29+ kmem_cache_free(kmem_cache, ptr); \
30+ }
31+
32 #endif /* !defined NVWATCH */
33
34
35@@ -776,6 +800,9 @@
36 unsigned long phys_addr;
37 unsigned long virt_addr;
38 dma_addr_t dma_addr;
39+#ifdef NV_SG_MAP_BUFFERS
40+ struct scatterlist sg_list;
41+#endif
42 #ifdef CONFIG_SWIOTLB
43 unsigned long orig_phys_addr;
44 unsigned long orig_virt_addr;
45@@ -789,15 +816,11 @@
46 unsigned int num_pages;
47 unsigned int order;
48 unsigned int size;
49- nv_pte_t *page_table; /* list of physical pages allocated */
50+ nv_pte_t **page_table; /* list of physical pages allocated */
51 void *key_mapping; /* mapping used as a key for finding this nv_alloc_t */
52 /* may be the same as page_table */
53 unsigned int class;
54 void *priv_data;
55-#if defined(NV_SG_MAP_BUFFERS)
56- struct pci_dev *dev;
57- struct scatterlist *sg_list; /* list of physical pages allocated */
58-#endif
59 } nv_alloc_t;
60
61
62diff -ru usr/src/nv/nv-vm.c usr/src/nv.1161283/nv-vm.c
63--- usr/src/nv/nv-vm.c 2004-11-03 22:53:00.000000000 +0100
64+++ usr/src/nv.1161283/nv-vm.c 2004-11-16 22:56:41.000000000 +0100
65@@ -138,13 +138,18 @@
66 */
67
68 int nv_vm_malloc_pages(
69- nv_alloc_t *at
70+ nv_state_t *nv,
71+ nv_alloc_t *at
72 )
73 {
74 /* point page_ptr at the start of the actual page list */
75- nv_pte_t *page_ptr = at->page_table;
76+ nv_pte_t *page_ptr = *at->page_table;
77 int i;
78 unsigned long virt_addr = 0, phys_addr;
79+#if defined(NV_SG_MAP_BUFFERS)
80+ nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
81+ struct pci_dev *dev = nvl->dev;
82+#endif
83
84 nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_vm_malloc_pages: %d pages\n",
85 at->num_pages);
86@@ -175,7 +180,7 @@
87 // for amd 64-bit platforms, remap pages to make them 32-bit addressable
88 // in this case, we need the final remapping to be contiguous, so we
89 // have to do the whole mapping at once, instead of page by page
90- if (nv_sg_map_buffer(at->dev, &at->sg_list[0],
91+ if (nv_sg_map_buffer(dev, &at->page_table[0]->sg_list,
92 (void *) virt_addr, at->num_pages))
93 {
94 nv_printf(NV_DBG_ERRORS,
95@@ -224,7 +229,7 @@
96 /* lock the page for dma purposes */
97 SetPageReserved(NV_GET_PAGE_STRUCT(phys_addr));
98
99- page_ptr = &at->page_table[i];
100+ page_ptr = at->page_table[i];
101 page_ptr->phys_addr = phys_addr;
102 page_ptr->virt_addr = virt_addr;
103 page_ptr->dma_addr = page_ptr->phys_addr;
104@@ -235,7 +240,7 @@
105 #if defined(NV_SG_MAP_BUFFERS)
106 if (!NV_ALLOC_MAPPING_CONTIG(at->flags))
107 {
108- if (nv_sg_map_buffer(at->dev, &at->sg_list[i],
109+ if (nv_sg_map_buffer(dev, &at->page_table[i]->sg_list,
110 __va(page_ptr->phys_addr), 1))
111 {
112 nv_printf(NV_DBG_ERRORS,
113@@ -243,7 +248,7 @@
114 goto failed;
115 }
116 }
117- nv_sg_load(&at->sg_list[i], page_ptr);
118+ nv_sg_load(&at->page_table[i]->sg_list, page_ptr);
119 #endif
120 virt_addr += PAGE_SIZE;
121 }
122@@ -258,7 +263,7 @@
123
124 for (; i >= 0; i--)
125 {
126- page_ptr = &at->page_table[i];
127+ page_ptr = at->page_table[i];
128
129 // if we failed when allocating this page, skip over it
130 // but if we failed pci_map_sg, make sure to free this page
131@@ -267,7 +272,7 @@
132 NV_UNLOCK_PAGE(page_ptr);
133 #if defined(NV_SG_MAP_BUFFERS)
134 if (!NV_ALLOC_MAPPING_CONTIG(at->flags))
135- nv_sg_unmap_buffer(at->dev, &at->sg_list[i], page_ptr);
136+ nv_sg_unmap_buffer(dev, &at->page_table[i]->sg_list, page_ptr);
137 #endif
138 if (!NV_ALLOC_MAPPING_CACHED(at->flags))
139 NV_SET_PAGE_ATTRIB_CACHED(page_ptr);
140@@ -279,15 +284,15 @@
141
142 if (NV_ALLOC_MAPPING_CONTIG(at->flags))
143 {
144- page_ptr = at->page_table;
145+ page_ptr = *at->page_table;
146 #if defined(NV_SG_MAP_BUFFERS)
147- nv_sg_unmap_buffer(at->dev, &at->sg_list[0], page_ptr);
148+ nv_sg_unmap_buffer(dev, &at->page_table[0]->sg_list, page_ptr);
149 #endif
150 NV_FREE_PAGES(page_ptr->virt_addr, at->order);
151 }
152 else if (NV_ALLOC_MAPPING_VMALLOC(at->flags))
153 {
154- page_ptr = at->page_table;
155+ page_ptr = *at->page_table;
156 NV_VFREE((void *) page_ptr->virt_addr, at->size);
157 }
158
159@@ -296,7 +301,7 @@
160
161 // unlock the pages we've locked down for dma purposes
162 void nv_vm_unlock_pages(
163- nv_alloc_t *at
164+ nv_alloc_t *at
165 )
166 {
167 nv_pte_t *page_ptr;
168@@ -315,17 +320,22 @@
169
170 for (i = 0; i < at->num_pages; i++)
171 {
172- page_ptr = &at->page_table[i];
173+ page_ptr = at->page_table[i];
174 NV_UNLOCK_PAGE(page_ptr);
175 }
176 }
177
178 void nv_vm_free_pages(
179- nv_alloc_t *at
180+ nv_state_t *nv,
181+ nv_alloc_t *at
182 )
183 {
184 nv_pte_t *page_ptr;
185 int i;
186+#if defined(NV_SG_MAP_BUFFERS)
187+ nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
188+ struct pci_dev *dev = nvl->dev;
189+#endif
190
191 nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_vm_free_pages: %d pages\n",
192 at->num_pages);
193@@ -339,10 +349,10 @@
194
195 for (i = 0; i < at->num_pages; i++)
196 {
197- page_ptr = &at->page_table[i];
198+ page_ptr = at->page_table[i];
199 #if defined(NV_SG_MAP_BUFFERS)
200 if (!NV_ALLOC_MAPPING_CONTIG(at->flags))
201- nv_sg_unmap_buffer(at->dev, &at->sg_list[i], page_ptr);
202+ nv_sg_unmap_buffer(dev, &at->page_table[i]->sg_list, page_ptr);
203 #endif
204 if (!NV_ALLOC_MAPPING_CACHED(at->flags))
205 NV_SET_PAGE_ATTRIB_CACHED(page_ptr);
206@@ -353,15 +363,15 @@
207
208 if (NV_ALLOC_MAPPING_CONTIG(at->flags))
209 {
210- page_ptr = at->page_table;
211+ page_ptr = *at->page_table;
212 #if defined(NV_SG_MAP_BUFFERS)
213- nv_sg_unmap_buffer(at->dev, &at->sg_list[0], page_ptr);
214+ nv_sg_unmap_buffer(dev, &at->page_table[0]->sg_list, page_ptr);
215 #endif
216 NV_FREE_PAGES(page_ptr->virt_addr, at->order);
217 }
218 else if (NV_ALLOC_MAPPING_VMALLOC(at->flags))
219 {
220- page_ptr = at->page_table;
221+ page_ptr = *at->page_table;
222 NV_VFREE((void *) page_ptr->virt_addr, at->size);
223 }
224 }
225diff -ru usr/src/nv/nv-vm.h usr/src/nv.1161283/nv-vm.h
226--- usr/src/nv/nv-vm.h 2004-11-03 22:53:00.000000000 +0100
227+++ usr/src/nv.1161283/nv-vm.h 2004-11-16 22:56:41.000000000 +0100
228@@ -11,9 +11,9 @@
229 #ifndef _NV_VM_H_
230 #define _NV_VM_H_
231
232-int nv_vm_malloc_pages(nv_alloc_t *);
233+int nv_vm_malloc_pages(nv_state_t *, nv_alloc_t *);
234 void nv_vm_unlock_pages(nv_alloc_t *);
235-void nv_vm_free_pages(nv_alloc_t *);
236+void nv_vm_free_pages(nv_state_t *, nv_alloc_t *);
237
238 #if defined(NV_DBG_MEM)
239 void nv_vm_list_page_count(nv_pte_t *, unsigned long);
240@@ -21,11 +21,12 @@
241 #define nv_vm_list_page_count(page_ptr, num_pages)
242 #endif
243
244-#define nv_vm_unlock_and_free_pages(at_count, at) \
245- if (at->page_table) { \
246- if (at_count == 0) \
247- nv_vm_unlock_pages(at); \
248- nv_vm_free_pages(at); \
249+#define NV_VM_UNLOCK_AND_FREE_PAGES(nv, at_count, at) \
250+ if (at->page_table) \
251+ { \
252+ if (at_count == 0) \
253+ nv_vm_unlock_pages(at); \
254+ nv_vm_free_pages(nv, at); \
255 }
256
257 #endif
258diff -ru usr/src/nv/nv.c usr/src/nv.1161283/nv.c
259--- usr/src/nv/nv.c 2004-11-03 22:53:00.000000000 +0100
260+++ usr/src/nv.1161283/nv.c 2004-11-16 22:57:24.000000000 +0100
261@@ -63,6 +63,8 @@
262 int nv_swiotlb = 0;
263 #endif
264
265+static kmem_cache_t *nv_pte_t_cache = NULL;
266+
267 // allow an easy way to convert all debug printfs related to events
268 // back and forth between 'info' and 'errors'
269 #if defined(NV_DBG_EVENTS)
270@@ -266,42 +268,41 @@
271 )
272 {
273 nv_alloc_t *at;
274- int pt_size;
275+ unsigned int pt_size, i;
276
277 NV_KMALLOC(at, sizeof(nv_alloc_t));
278 if (at == NULL)
279 {
280- nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate alloc_t\n");
281+ nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate alloc info\n");
282 return NULL;
283 }
284
285 memset(at, 0, sizeof(nv_alloc_t));
286
287- pt_size = num_pages * sizeof(nv_pte_t);
288- NV_KMALLOC(at->page_table, pt_size);
289- if (at->page_table == NULL)
290+ pt_size = num_pages * sizeof(nv_pte_t *);
291+ if (os_alloc_mem((void **)&at->page_table, pt_size) != RM_OK)
292 {
293 nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate page table\n");
294 NV_KFREE(at, sizeof(nv_alloc_t));
295 return NULL;
296 }
297+
298 memset(at->page_table, 0, pt_size);
299 at->num_pages = num_pages;
300 NV_ATOMIC_SET(at->usage_count, 0);
301
302-#if defined(NV_SG_MAP_BUFFERS)
303- at->dev = dev;
304- pt_size = num_pages * sizeof(struct scatterlist);
305- NV_KMALLOC(at->sg_list, pt_size);
306- if (at->sg_list == NULL)
307+ for (i = 0; i < at->num_pages; i++)
308 {
309- nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate scatter gather list\n");
310- NV_KFREE(at->page_table, pt_size);
311- NV_KFREE(at, sizeof(nv_alloc_t));
312- return NULL;
313+ NV_KMEM_CACHE_ALLOC(at->page_table[i], nv_pte_t_cache, nv_pte_t);
314+ if (at->page_table[i] == NULL)
315+ {
316+ nv_printf(NV_DBG_ERRORS,
317+ "NVRM: failed to allocate page table entry\n");
318+ nvos_free_alloc(at);
319+ return NULL;
320+ }
321+ memset(at->page_table[i], 0, sizeof(nv_pte_t));
322 }
323- memset(at->sg_list, 0, pt_size);
324-#endif
325
326 return at;
327 }
328@@ -311,6 +312,8 @@
329 nv_alloc_t *at
330 )
331 {
332+ unsigned int pt_size, i;
333+
334 if (at == NULL)
335 return -1;
336
337@@ -320,13 +323,16 @@
338 // we keep the page_table around after freeing the pages
339 // for bookkeeping reasons. Free the page_table and assume
340 // the underlying pages are already unlocked and freed.
341- if (at->page_table)
342- NV_KFREE(at->page_table, at->num_pages * sizeof(nv_pte_t));
343-
344-#if defined(NV_SG_MAP_BUFFERS)
345- if (at->sg_list)
346- NV_KFREE(at->sg_list, at->num_pages * sizeof(struct scatterlist));
347-#endif
348+ if (at->page_table != NULL)
349+ {
350+ for (i = 0; i < at->num_pages; i++)
351+ {
352+ if (at->page_table[i] != NULL)
353+ NV_KMEM_CACHE_FREE(at->page_table[i], nv_pte_t, nv_pte_t_cache);
354+ }
355+ pt_size = at->num_pages * sizeof(nv_pte_t *);
356+ os_free_mem(at->page_table);
357+ }
358
359 NV_KFREE(at, sizeof(nv_alloc_t));
360
361@@ -594,7 +600,7 @@
362 int i;
363 for (i = 0; i < at->num_pages; i++)
364 {
365- unsigned long offset = at->page_table[i].phys_addr;
366+ unsigned long offset = at->page_table[i]->phys_addr;
367 if ((address >= offset) &&
368 (address < (offset + PAGE_SIZE)))
369 return at;
370@@ -931,6 +937,13 @@
371 }
372 #endif
373
374+ NV_KMEM_CACHE_CREATE(nv_pte_t_cache, "nv_pte_t", nv_pte_t);
375+ if (nv_pte_t_cache == NULL)
376+ {
377+ nv_printf(NV_DBG_ERRORS, "NVRM: pte cache allocation failed\n");
378+ goto failed;
379+ }
380+
381 // Init the resource manager
382 if (!rm_init_rm())
383 {
384@@ -972,6 +985,14 @@
385 return 0;
386
387 failed:
388+ if (nv_pte_t_cache != NULL)
389+ NV_KMEM_CACHE_DESTROY(nv_pte_t_cache);
390+
391+#if defined(NV_PM_SUPPORT_APM)
392+ for (i = 0; i < num_nv_devices; i++)
393+ if (apm_nv_dev[i] != NULL) pm_unregister(apm_nv_dev[i]);
394+#endif
395+
396 #ifdef CONFIG_DEVFS_FS
397 NV_DEVFS_REMOVE_CONTROL();
398 for (i = 0; i < num_nv_devices; i++)
399@@ -1101,6 +1122,8 @@
400 nv_printf(NV_DBG_ERRORS, "NVRM: final mem usage: vm 0x%x km 0x%x fp 0x%x\n",
401 vm_usage, km_usage, fp_usage);
402 #endif
403+
404+ NV_KMEM_CACHE_DESTROY(nv_pte_t_cache);
405 }
406
407 module_init(nvidia_init_module);
408@@ -1249,15 +1272,15 @@
409 index = (address - vma->vm_start)>>PAGE_SHIFT;
410
411 // save that index into our page list (make sure it doesn't already exist)
412- if (at->page_table[index].phys_addr)
413+ if (at->page_table[index]->phys_addr)
414 {
415 nv_printf(NV_DBG_ERRORS, "NVRM: page slot already filled in nopage handler!\n");
416 os_dbg_breakpoint();
417 }
418
419- at->page_table[index].phys_addr = (page_to_pfn(page_ptr) << PAGE_SHIFT);
420- at->page_table[index].dma_addr = (page_to_pfn(page_ptr) << PAGE_SHIFT);
421- at->page_table[index].virt_addr = (unsigned long) __va(page_to_pfn(page_ptr) << PAGE_SHIFT);
422+ at->page_table[index]->phys_addr = (page_to_pfn(page_ptr) << PAGE_SHIFT);
423+ at->page_table[index]->dma_addr = (page_to_pfn(page_ptr) << PAGE_SHIFT);
424+ at->page_table[index]->virt_addr = (unsigned long) __va(page_to_pfn(page_ptr) << PAGE_SHIFT);
425
426 return page_ptr;
427 #endif
428@@ -1670,7 +1693,7 @@
429 start = vma->vm_start;
430 while (pages--)
431 {
432- page = (unsigned long) at->page_table[i++].phys_addr;
433+ page = (unsigned long) at->page_table[i++]->phys_addr;
434 if (NV_REMAP_PAGE_RANGE(start, page, PAGE_SIZE, vma->vm_page_prot))
435 return -EAGAIN;
436 start += PAGE_SIZE;
437@@ -2368,8 +2391,8 @@
438
439 for (i = 0; i < at->num_pages; i++)
440 {
441- if (address == at->page_table[i].phys_addr)
442- return (void *)(at->page_table[i].virt_addr + offset);
443+ if (address == at->page_table[i]->phys_addr)
444+ return (void *)(at->page_table[i]->virt_addr + offset);
445 }
446 }
447
448@@ -2400,8 +2423,8 @@
449
450 for (i = 0; i < at->num_pages; i++)
451 {
452- if (address == at->page_table[i].phys_addr)
453- return (unsigned long)at->page_table[i].dma_addr + offset;
454+ if (address == at->page_table[i]->phys_addr)
455+ return (unsigned long)at->page_table[i]->dma_addr + offset;
456 }
457 }
458
459@@ -2427,9 +2450,9 @@
460 unsigned long address = dma_address & PAGE_MASK;
461 for (i = 0; i < at->num_pages; i++)
462 {
463- if (address == at->page_table[i].dma_addr)
464+ if (address == at->page_table[i]->dma_addr)
465 {
466- return at->page_table[i].phys_addr + offset;
467+ return at->page_table[i]->phys_addr + offset;
468 }
469 }
470 }
471@@ -2466,7 +2489,7 @@
472 int i;
473 for (i = 0; i < at->num_pages; i++)
474 {
475- if (address == (unsigned long) at->page_table[i].dma_addr)
476+ if (address == (unsigned long) at->page_table[i]->dma_addr)
477 {
478 return (void *)((unsigned long) at->key_mapping +
479 (i * PAGE_SIZE));
480@@ -2630,7 +2653,7 @@
481 nvl_add_alloc(nvl, at);
482 } else {
483 /* use nvidia's nvagp support */
484- if (nv_vm_malloc_pages(at))
485+ if (nv_vm_malloc_pages(nv, at))
486 goto failed;
487
488 at->class = class;
489@@ -2654,7 +2677,7 @@
490 if (rm_status)
491 {
492 nvl_remove_alloc(nvl, at);
493- nv_vm_unlock_and_free_pages(NV_ATOMIC_READ(at->usage_count), at);
494+ NV_VM_UNLOCK_AND_FREE_PAGES(nv, NV_ATOMIC_READ(at->usage_count), at);
495 goto failed;
496 }
497 at->priv_data = *priv_data;
498@@ -2666,12 +2689,12 @@
499 else
500 {
501
502- if (nv_vm_malloc_pages(at))
503+ if (nv_vm_malloc_pages(nv, at))
504 goto failed;
505
506 if (kernel)
507 {
508- *pAddress = (void *) at->page_table[0].virt_addr;
509+ *pAddress = (void *) at->page_table[0]->virt_addr;
510 }
511 else
512 {
513@@ -2679,7 +2702,7 @@
514 * so use the first page, which is page-aligned. this way, our
515 * allocated page table does not need to be page-aligned
516 */
517- *pAddress = (void *) at->page_table[0].phys_addr;
518+ *pAddress = (void *) at->page_table[0]->phys_addr;
519 }
520
521 nvl_add_alloc(nvl, at);
522@@ -2743,7 +2766,7 @@
523 rmStatus = rm_free_agp_pages(nv, pAddress, priv_data);
524 if (rmStatus == RM_OK)
525 {
526- nv_vm_unlock_and_free_pages(NV_ATOMIC_READ(at->usage_count), at);
527+ NV_VM_UNLOCK_AND_FREE_PAGES(nv, NV_ATOMIC_READ(at->usage_count), at);
528 }
529 }
530 } else {
531@@ -2759,7 +2782,7 @@
532
533 NV_ATOMIC_DEC(at->usage_count);
534
535- nv_vm_unlock_and_free_pages(NV_ATOMIC_READ(at->usage_count), at);
536+ NV_VM_UNLOCK_AND_FREE_PAGES(nv, NV_ATOMIC_READ(at->usage_count), at);
537 }
538
539 if (NV_ATOMIC_READ(at->usage_count) == 0)
540@@ -3065,7 +3088,7 @@
541 }
542
543 /* get the physical address of this page */
544- *paddr = (U032) ((NV_UINTPTR_T)at->page_table[index].dma_addr);
545+ *paddr = (U032) ((NV_UINTPTR_T)at->page_table[index]->dma_addr);
546
547 return RM_OK;
548 }
This page took 0.097269 seconds and 4 git commands to generate.