--- /dev/null
+--- X11-driver-firegl-8.10.19/lib/modules/fglrx/build_mod/agpgart_be.c.orig 2005-01-31 19:50:00.000000000 +0200
++++ X11-driver-firegl-8.10.19/lib/modules/fglrx/build_mod/agpgart_be.c 2005-03-02 22:19:25.000000000 +0200
+@@ -261,6 +261,12 @@
+ #define firegl_pci_find_class(class,from) pci_find_class(class,from)
+ #endif
+
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
++#define firegl_pci_find_class(class,from) pci_get_class(class,from)
++#else
++#define firegl_pci_find_class(class,from) pci_find_class(class,from)
++#endif
++
+ int agp_backend_acquire(void)
+ {
+ if (agp_bridge.type == NOT_SUPPORTED) {
+@@ -4075,6 +4081,13 @@
+ }
+ }
+ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
++ // the reference count has been increased in agp_backend_initialize.
++ if (device)
++ pci_dev_put(device);
++#endif
++
+ /*
+ * PASS3: Figure out the 8X/4X setting and enable the
+ * target (our motherboard chipset).
+@@ -5283,6 +5296,12 @@
+ pci_dev_put(device);
+ #endif
+
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
++ // the reference count has been increased in agp_backend_initialize.
++ if (device)
++ pci_dev_put(device);
++#endif
++
+ return(0); /* success */
+ }
+
+@@ -7428,6 +7447,11 @@
+ return rc;
+ }
+
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
++ // decrease the reference count.
++ pci_dev_put(agp_bridge.dev);
++#endif
++
+ if (agp_bridge.needs_scratch_page == TRUE) {
+ agp_bridge.scratch_page = agp_bridge.agp_alloc_page();
+
+--- X11-driver-firegl-8.10.19/lib/modules/fglrx/build_mod/firegl_public.c.orig 2005-02-10 04:15:00.000000000 +0200
++++ X11-driver-firegl-8.10.19/lib/modules/fglrx/build_mod/firegl_public.c 2005-03-02 22:21:42.000000000 +0200
+@@ -1664,13 +1664,22 @@
+ {
+ unsigned long pte_linear;
+ pgd_t* pgd_p;
++#if LINUX_VERSION_CODE > 0x02060a /* KERNEL_VERSION(2,6,11) */
++ pud_t* pud_p;
++#endif
+ pmd_t* pmd_p;
+ pte_t* pte_p;
+ pte_t pte;
+
+ pte_linear = VMALLOC_VMADDR(virtual_addr); // convert to pte linear address (x86 => nop)
+ pgd_p = pgd_offset(mm, pte_linear);
++#if LINUX_VERSION_CODE > 0x02060a /* KERNEL_VERSION(2,6,11) */
++ pud_p = pud_offset(pgd_p, pte_linear);
++ pmd_p = pmd_offset(pud_p, pte_linear);
++#else
+ pmd_p = pmd_offset(pgd_p, pte_linear);
++#endif
++
+ #ifndef FGL_ATOMIC_PTE
+ #if LINUX_VERSION_CODE > 0x020500
+ pte_p = pte_offset_kernel(pmd_p, pte_linear);
+@@ -2100,6 +2109,9 @@
+ unsigned long address)
+ {
+ pgd_t* pgd_p;
++#if LINUX_VERSION_CODE > 0x02060a /* KERNEL_VERSION(2,6,11) */
++ pud_t* pud_p;
++#endif
+ pmd_t* pmd_p;
+ pte_t* pte_p;
+ pte_t pte;
+@@ -2200,7 +2212,12 @@
+ /* alternatively we could generate a NOPAGE_OOM "out of memory" */
+ }
+ /* locate medium level page table (x86 => nop) */
++#if LINUX_VERSION_CODE > 0x02060a /* KERNEL_VERSION(2,6,11) */
++ pud_p = pud_offset(pgd_p, pte_linear);
++ pmd_p = pmd_offset(pud_p, pte_linear);
++#else
+ pmd_p = pmd_offset(pgd_p, pte_linear);
++#endif
+ if (!pmd_present(*pmd_p))
+ {
+ __KE_ERROR("FATAL ERROR: User queue buffer not present! (pmd)\n");
+@@ -2564,13 +2581,21 @@
+ {
+ unsigned long pte_linear;
+ pgd_t* pgd_p;
++#if LINUX_VERSION_CODE > 0x02060a /* KERNEL_VERSION(2,6,11) */
++ pud_t* pud_p;
++#endif
+ pmd_t* pmd_p;
+ pte_t* pte_p;
+ pte_t pte;
+
+ pte_linear = VMALLOC_VMADDR(virtual_addr); // convert to pte linear address (x86 => nop)
+ pgd_p = pgd_offset(vma->vm_mm, pte_linear);
++#if LINUX_VERSION_CODE > 0x02060a /* KERNEL_VERSION(2,6,11) */
++ pud_p = pud_offset(pgd_p, pte_linear);
++ pmd_p = pmd_offset(pud_p, pte_linear);
++#else
+ pmd_p = pmd_offset(pgd_p, pte_linear);
++#endif
+ #ifndef FGL_ATOMIC_PTE
+ #if LINUX_VERSION_CODE > 0x020500
+ pte_p = pte_offset_kernel(pmd_p, pte_linear);
+@@ -2719,13 +2744,13 @@
+ #endif /* __ia64__ */
+ vma->vm_flags |= VM_IO; /* not in core dump */
+ }
+- if (remap_page_range(FGL_VMA_API_PASS
++ if (remap_pfn_range(FGL_VMA_API_PASS
+ vma->vm_start,
+- __ke_vm_offset(vma),
++ __ke_vm_offset(vma) >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+ {
+- __KE_DEBUG("remap_page_range failed\n");
++ __KE_DEBUG("remap_pfn_range failed\n");
+ return -EAGAIN;
+ }
+ vma->vm_flags |= VM_SHM | VM_RESERVED; /* Don't swap */
+@@ -2786,13 +2811,13 @@
+ {
+ if (__ke_vm_offset(vma) >= __pa(high_memory))
+ vma->vm_flags |= VM_IO; /* not in core dump */
+- if (remap_page_range(FGL_VMA_API_PASS
++ if (remap_pfn_range(FGL_VMA_API_PASS
+ vma->vm_start,
+- __ke_vm_offset(vma),
++ __ke_vm_offset(vma) >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+ {
+- __KE_DEBUG("remap_page_range failed\n");
++ __KE_DEBUG("remap_pfn_range failed\n");
+ return -EAGAIN;
+ }
+ #ifdef __x86_64__
+@@ -2823,13 +2848,13 @@
+ {
+ if (__ke_vm_offset(vma) >= __pa(high_memory))
+ vma->vm_flags |= VM_IO; /* not in core dump */
+- if (remap_page_range(FGL_VMA_API_PASS
++ if (remap_pfn_range(FGL_VMA_API_PASS
+ vma->vm_start,
+- __ke_vm_offset(vma),
++ __ke_vm_offset(vma) >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+ {
+- __KE_DEBUG("remap_page_range failed\n");
++ __KE_DEBUG("remap_pfn_range failed\n");
+ return -EAGAIN;
+ }
+ #ifdef __x86_64__
+@@ -2873,6 +2898,37 @@
+
+ #if LINUX_VERSION_CODE >= 0x020400
+
++#if LINUX_VERSION_CODE >= 0x02060b
++
++typedef struct {
++ void (*free_memory)(struct agp_memory *);
++ struct agp_memory * (*allocate_memory)(size_t, u32);
++ int (*bind_memory)(struct agp_memory *, off_t);
++ int (*unbind_memory)(struct agp_memory *);
++ void (*enable)(u32);
++ int (*acquire)(void);
++ void (*release)(void);
++ int (*copy_info)(struct agp_kern_info *);
++} drm_agp_t;
++
++static const drm_agp_t drm_agp = {
++ &agp_free_memory,
++ &agp_allocate_memory,
++ &agp_bind_memory,
++ &agp_unbind_memory,
++ &agp_enable,
++ &agp_backend_acquire,
++ &agp_backend_release,
++ &agp_copy_info
++};
++#undef DRM_AGP_MODULE_GET
++#undef DRM_AGP_MODULE_PUT
++
++#define DRM_AGP_MODULE_GET &drm_agp
++#define DRM_AGP_MODULE_PUT
++
++#endif
++
+ static const drm_agp_t *drm_agp_module_stub = NULL;
+
+ #define AGP_FUNCTIONS 8