--- linux/include/linux/agp_backend.h.newagpdist Fri Feb 11 14:50:45 2000 +++ linux/include/linux/agp_backend.h Fri Feb 11 14:50:45 2000 @@ -0,0 +1,224 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight + * Copyright (C) 1999 Xi Graphics + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _AGP_BACKEND_H +#define _AGP_BACKEND_H 1 + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef FALSE +#define FALSE 0 +#endif + +#define AGPGART_VERSION_MAJOR 0 +#define AGPGART_VERSION_MINOR 99 + +enum chipset_type { + NOT_SUPPORTED, + INTEL_GENERIC, + INTEL_LX, + INTEL_BX, + INTEL_GX, + INTEL_I810, + VIA_GENERIC, + VIA_VP3, + VIA_MVP3, + VIA_APOLLO_PRO, + SIS_GENERIC, + AMD_GENERIC, + AMD_IRONGATE, + ALI_M1541, + ALI_GENERIC +}; + +typedef struct _agp_version { + u16 major; + u16 minor; +} agp_version; + +typedef struct _agp_kern_info { + agp_version version; + struct pci_dev *device; + enum chipset_type chipset; + unsigned long mode; + off_t aper_base; + size_t aper_size; + int max_memory; /* In pages */ + int current_memory; +} agp_kern_info; + +/* + * The agp_memory structure has information + * about the block of agp memory allocated. + * A caller may manipulate the next and prev + * pointers to link each allocated item into + * a list. These pointers are ignored by the + * backend. Everything else should never be + * written to, but the caller may read any of + * the items to detrimine the status of this + * block of agp memory. + * + */ + +typedef struct _agp_memory { + int key; + struct _agp_memory *next; + struct _agp_memory *prev; + size_t page_count; + int num_scratch_pages; + unsigned long *memory; + off_t pg_start; + u32 type; + u8 is_bound; + u8 is_flushed; +} agp_memory; + +#define AGP_NORMAL_MEMORY 0 + +extern void agp_free_memory(agp_memory *); + +/* + * void agp_free_memory(agp_memory *curr) : + * + * This function frees memory associated with + * an agp_memory pointer. It is the only function + * that can be called when the backend is not owned + * by the caller. (So it can free memory on client + * death.) + * + * It takes an agp_memory pointer as an argument. + * + */ + +extern agp_memory *agp_allocate_memory(size_t, u32); + +/* + * agp_memory *agp_allocate_memory(size_t page_count, u32 type) : + * + * This function allocates a group of pages of + * a certain type. + * + * It takes a size_t argument of the number of pages, and + * an u32 argument of the type of memory to be allocated. + * Every agp bridge device will allow you to allocate + * AGP_NORMAL_MEMORY which maps to physical ram. Any other + * type is device dependant. + * + * It returns NULL whenever memory is unavailable. + * + */ + +extern void agp_copy_info(agp_kern_info *); + +/* + * void agp_copy_info(agp_kern_info *info) : + * + * This function copies information about the + * agp bridge device and the state of the agp + * backend into an agp_kern_info pointer. + * + * It takes an agp_kern_info pointer as an + * argument. The caller should insure that + * this pointer is valid. + * + */ + +extern int agp_bind_memory(agp_memory *, off_t); + +/* + * int agp_bind_memory(agp_memory *curr, off_t pg_start) : + * + * This function binds an agp_memory structure + * into the graphics aperture translation table. + * + * It takes an agp_memory pointer and an offset into + * the graphics aperture translation table as arguments + * + * It returns -EINVAL if the pointer == NULL. + * It returns -EBUSY if the area of the table + * requested is already in use. + * + */ + +extern int agp_unbind_memory(agp_memory *); + +/* + * int agp_unbind_memory(agp_memory *curr) : + * + * This function removes an agp_memory structure + * from the graphics aperture translation table. + * + * It takes an agp_memory pointer as an argument. + * + * It returns -EINVAL if this piece of agp_memory + * is not currently bound to the graphics aperture + * translation table or if the agp_memory + * pointer == NULL + * + */ + +extern void agp_enable(u32); + +/* + * void agp_enable(u32 mode) : + * + * This function initializes the agp point-to-point + * connection. + * + * It takes an agp mode register as an argument + * + */ + +extern int agp_backend_acquire(void); + +/* + * int agp_backend_acquire(void) : + * + * This Function attempts to acquire the agp + * backend. + * + * returns -EBUSY if agp is in use, + * returns 0 if the caller owns the agp backend + */ + +extern void agp_backend_release(void); + +/* + * void agp_backend_release(void) : + * + * This Function releases the lock on the agp + * backend. + * + * The caller must insure that the graphics + * aperture translation table is read for use + * by another entity. (Ensure that all memory + * it bound is unbound.) + * + */ + +#endif /* _AGP_BACKEND_H */ --- linux/include/linux/agpgart.h.newagpdist Fri Feb 11 14:50:45 2000 +++ linux/include/linux/agpgart.h Fri Feb 11 14:50:45 2000 @@ -0,0 +1,216 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight + * Copyright (C) 1999 Xi Graphics + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _AGP_H +#define _AGP_H 1 + +#define AGPIOC_BASE 'A' +#define AGPIOC_INFO _IOR (AGPIOC_BASE, 0, agp_info*) +#define AGPIOC_ACQUIRE _IO (AGPIOC_BASE, 1) +#define AGPIOC_RELEASE _IO (AGPIOC_BASE, 2) +#define AGPIOC_SETUP _IOW (AGPIOC_BASE, 3, agp_setup*) +#define AGPIOC_RESERVE _IOW (AGPIOC_BASE, 4, agp_region*) +#define AGPIOC_PROTECT _IOW (AGPIOC_BASE, 5, agp_region*) +#define AGPIOC_ALLOCATE _IOWR(AGPIOC_BASE, 6, agp_allocate*) +#define AGPIOC_DEALLOCATE _IOW (AGPIOC_BASE, 7, int) +#define AGPIOC_BIND _IOW (AGPIOC_BASE, 8, agp_bind*) +#define AGPIOC_UNBIND _IOW (AGPIOC_BASE, 9, agp_unbind*) + +#define AGP_DEVICE "/dev/agpgart" + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef __KERNEL__ +#include +#include + +typedef struct _agp_version { + __u16 major; + __u16 minor; +} agp_version; + +typedef struct _agp_info { + agp_version version; /* version of the driver */ + __u32 bridge_id; /* bridge vendor/device */ + __u32 agp_mode; /* mode info of bridge */ + off_t aper_base; /* base of aperture */ + size_t aper_size; /* size of aperture */ + size_t pg_total; /* max pages (swap + system) */ + size_t pg_system; /* max pages (system) */ + size_t pg_used; /* current pages used */ +} agp_info; + +typedef struct _agp_setup { + __u32 agp_mode; /* mode info of bridge */ +} agp_setup; + +/* + * The "prot" down below needs still a "sleep" flag somehow ... + */ +typedef struct _agp_segment { + off_t pg_start; /* starting page to populate */ + size_t pg_count; /* number of pages */ + int prot; /* prot flags for mmap */ +} agp_segment; + +typedef struct _agp_region { + pid_t pid; /* pid of process */ + size_t seg_count; /* number of segments */ + struct _agp_segment *seg_list; +} agp_region; + +typedef struct _agp_allocate { + int key; /* tag of allocation */ + size_t pg_count; /* number of pages */ + __u32 type; /* 0 == normal, other devspec */ +} agp_allocate; + +typedef struct _agp_bind { + int key; /* tag of allocation */ + off_t pg_start; /* starting page to populate */ +} agp_bind; + +typedef struct _agp_unbind { + int key; /* tag of allocation */ + __u32 priority; /* priority for paging out */ +} agp_unbind; + +#else /* __KERNEL__ */ + +#define AGPGART_MINOR 175 + +#define AGP_UNLOCK() up(&(agp_fe.agp_mutex)); +#define AGP_LOCK() down(&(agp_fe.agp_mutex)); +#define AGP_LOCK_INIT() sema_init(&(agp_fe.agp_mutex), 1) + +#ifndef _AGP_BACKEND_H +typedef struct _agp_version { + u16 major; + u16 minor; +} agp_version; + +#endif + +typedef struct _agp_info { + agp_version version; /* version of the driver */ + u32 bridge_id; /* bridge vendor/device */ + u32 agp_mode; /* mode info of bridge */ + off_t aper_base; /* base of aperture */ + size_t aper_size; /* size of aperture */ + size_t pg_total; /* max pages (swap + system) */ + size_t pg_system; /* max pages (system) */ + size_t pg_used; /* current pages used */ +} agp_info; + +typedef struct _agp_setup { + u32 agp_mode; /* mode info of bridge */ +} agp_setup; + +/* + * The "prot" down below needs still a "sleep" flag somehow ... + */ +typedef struct _agp_segment { + off_t pg_start; /* starting page to populate */ + size_t pg_count; /* number of pages */ + int prot; /* prot flags for mmap */ +} agp_segment; + +typedef struct _agp_segment_priv { + off_t pg_start; + size_t pg_count; + pgprot_t prot; +} agp_segment_priv; + +typedef struct _agp_region { + pid_t pid; /* pid of process */ + size_t seg_count; /* number of segments */ + struct _agp_segment *seg_list; +} agp_region; + +typedef struct _agp_allocate { + int key; /* tag of allocation */ + size_t pg_count; /* number of pages */ + u32 type; /* 0 == normal, other devspec */ +} agp_allocate; + +typedef struct _agp_bind { + int key; /* tag of allocation */ + off_t pg_start; /* starting page to populate */ +} agp_bind; + +typedef struct _agp_unbind { + int key; /* tag of allocation */ + u32 priority; /* priority for paging out */ +} agp_unbind; + +typedef struct _agp_client { + struct _agp_client *next; + struct _agp_client *prev; + pid_t pid; + int num_segments; + agp_segment_priv **segments; +} agp_client; + +typedef struct _agp_controller { + struct _agp_controller *next; + struct _agp_controller *prev; + pid_t pid; + int num_clients; + agp_memory *pool; + agp_client *clients; +} agp_controller; + +#define AGP_FF_ALLOW_CLIENT 0 +#define AGP_FF_ALLOW_CONTROLLER 1 +#define AGP_FF_IS_CLIENT 2 +#define AGP_FF_IS_CONTROLLER 3 +#define AGP_FF_IS_VALID 4 + +typedef struct _agp_file_private { + struct _agp_file_private *next; + struct _agp_file_private *prev; + pid_t my_pid; + u32 access_flags; +} agp_file_private; + +struct agp_front_data { + struct semaphore agp_mutex; + agp_controller *current_controller; + agp_controller *controllers; + agp_file_private *file_priv_list; + u8 used_by_controller; + u8 backend_acquired; +}; + +#endif /* __KERNEL__ */ + +#endif /* _AGP_H */ --- linux/drivers/char/agp/Makefile.newagpdist Fri Feb 11 14:50:45 2000 +++ linux/drivers/char/agp/Makefile Fri Feb 11 14:50:45 2000 @@ -0,0 +1,32 @@ +# +# Makefile for the agpgart device driver. This driver adds a user +# space ioctl interface to use agp memory. It also adds a kernel interface +# that other drivers could use to manipulate agp memory. + +M_OBJS := agpgart.o + +CFLAGS_agp_backend.o := + +ifdef CONFIG_AGP_I810 +CFLAGS_agp_backend.o += -DAGP_BUILD_INTEL_I810 +endif +ifdef CONFIG_AGP_INTEL +CFLAGS_agp_backend.o += -DAGP_BUILD_INTEL_GENERIC +endif +ifdef CONFIG_AGP_VIA +CFLAGS_agp_backend.o += -DAGP_BUILD_VIA_GENERIC +endif +ifdef CONFIG_AGP_AMD +CFLAGS_agp_backend.o += -DAGP_BUILD_AMD_IRONGATE +endif +ifdef CONFIG_AGP_SIS +CFLAGS_agp_backend.o += -DAGP_BUILD_SIS_GENERIC +endif +ifdef CONFIG_AGP_ALI +CFLAGS_agp_backend.o += -DAGP_BUILD_ALI_M1541 +endif + +include $(TOPDIR)/Rules.make + +agpgart.o: agp_backend.o agpgart_fe.o + $(LD) $(LD_RFLAG) -r -o $@ agp_backend.o agpgart_fe.o --- linux/drivers/char/agp/agp_backend.c.newagpdist Fri Feb 11 14:50:45 2000 +++ linux/drivers/char/agp/agp_backend.c Fri Feb 11 14:50:45 2000 @@ -0,0 +1,1983 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight + * Copyright (C) 1999 Xi Graphics + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ +#define EXPORT_SYMTAB +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "agp_backendP.h" + +static struct agp_bridge_data agp_bridge; + +#define CACHE_FLUSH agp_bridge.cache_flush + +MODULE_AUTHOR("Jeff Hartmann "); +MODULE_PARM(agp_try_unsupported, "1i"); +EXPORT_SYMBOL(agp_free_memory); +EXPORT_SYMBOL(agp_allocate_memory); +EXPORT_SYMBOL(agp_copy_info); +EXPORT_SYMBOL(agp_bind_memory); +EXPORT_SYMBOL(agp_unbind_memory); +EXPORT_SYMBOL(agp_enable); +EXPORT_SYMBOL(agp_backend_acquire); +EXPORT_SYMBOL(agp_backend_release); + +static int agp_try_unsupported __initdata = 0; + +#ifdef __SMP__ +static atomic_t cpus_waiting; +#endif + +int agp_backend_acquire(void) +{ + atomic_inc(&(agp_bridge.agp_in_use)); + + if (atomic_read(&(agp_bridge.agp_in_use)) != 1) { + atomic_dec(&(agp_bridge.agp_in_use)); + return -EBUSY; + } + MOD_INC_USE_COUNT; + return 0; +} + +void agp_backend_release(void) +{ + atomic_dec(&(agp_bridge.agp_in_use)); + MOD_DEC_USE_COUNT; +} + +static void flush_cache(void) +{ + asm volatile ("wbinvd":::"memory"); +} + +#ifdef __SMP__ +static void ipi_handler(void *null) +{ + flush_cache(); + atomic_dec(&cpus_waiting); + while (atomic_read(&cpus_waiting) > 0) + barrier(); +} + +static void smp_flush_cache(void) +{ + atomic_set(&cpus_waiting, smp_num_cpus - 1); + if (smp_call_function(ipi_handler, NULL, 1, 0) != 0) + panic("agpgart: timed out waiting for the other CPUs!\n"); + flush_cache(); + while (atomic_read(&cpus_waiting) > 0) + barrier(); +} +#endif + +/* + * Basic Page Allocation Routines - + * These routines handle page allocation + * and by default they reserve the allocated + * memory. They also handle incrementing the + * current_memory_agp value, Which is checked + * against a maximum value. + */ + +static void *agp_alloc_page(void) +{ + void *pt; + + pt = (void *) __get_free_page(GFP_KERNEL); + if (pt == NULL) { + return NULL; + } + atomic_inc(&(mem_map[MAP_NR(pt)].count)); + set_bit(PG_locked, &mem_map[MAP_NR(pt)].flags); + atomic_inc(&(agp_bridge.current_memory_agp)); + return pt; +} + +static void agp_destroy_page(void *pt) +{ + if (pt == NULL) + return; + + atomic_dec(&(mem_map[MAP_NR(pt)].count)); + clear_bit(PG_locked, &mem_map[MAP_NR(pt)].flags); + free_page((unsigned long) pt); + atomic_dec(&(agp_bridge.current_memory_agp)); +} + +/* End Basic Page Allocation Routines */ + +/* + * Generic routines for handling agp_memory structures - + * They use the basic page allocation routines to do the + * brunt of the work. + */ + +#define MAXKEY (4096 * 32) + +static void agp_free_key(int key) +{ + + if (key < 0) { + return; + } + if (key < MAXKEY) { + clear_bit(key, agp_bridge.key_list); + } +} + +static int agp_get_key(void) +{ + int bit; + + bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY); + if (bit < MAXKEY) { + set_bit(bit, agp_bridge.key_list); + return bit; + } + return -1; +} + +static agp_memory *agp_create_memory(int scratch_pages) +{ + agp_memory *new; + + new = kmalloc(sizeof(agp_memory), GFP_KERNEL); + + if (new == NULL) { + return NULL; + } + memset(new, 0, sizeof(agp_memory)); + new->key = agp_get_key(); + + if (new->key < 0) { + kfree(new); + return NULL; + } + new->memory = vmalloc(PAGE_SIZE * scratch_pages); + + if (new->memory == NULL) { + agp_free_key(new->key); + kfree(new); + return NULL; + } + new->num_scratch_pages = scratch_pages; + return new; +} + +void agp_free_memory(agp_memory * curr) +{ + int i; + + if (curr == NULL) { + return; + } + if (curr->is_bound == TRUE) { + agp_unbind_memory(curr); + } + if (curr->type != 0) { + agp_bridge.free_by_type(curr); + MOD_DEC_USE_COUNT; + return; + } + if (curr->page_count != 0) { + for (i = 0; i < curr->page_count; i++) { + curr->memory[i] &= ~(0x00000fff); + agp_destroy_page((void *) phys_to_virt(curr->memory[i])); + } + } + agp_free_key(curr->key); + vfree(curr->memory); + kfree(curr); + MOD_DEC_USE_COUNT; +} + +#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) + +agp_memory *agp_allocate_memory(size_t page_count, u32 type) +{ + int scratch_pages; + agp_memory *new; + int i; + + if ((atomic_read(&(agp_bridge.current_memory_agp)) + page_count) > + agp_bridge.max_memory_agp) { + return NULL; + } + if (type != 0) { + new = agp_bridge.alloc_by_type(page_count, type); + return new; + } + scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; + + new = agp_create_memory(scratch_pages); + + if (new == NULL) { + return NULL; + } + for (i = 0; i < page_count; i++) { + new->memory[i] = (unsigned long) agp_alloc_page(); + + if ((void *) new->memory[i] == NULL) { + /* Free this structure */ + agp_free_memory(new); + return NULL; + } + new->memory[i] = + agp_bridge.mask_memory(virt_to_phys((void *) new->memory[i]), type); + new->page_count++; + } + + MOD_INC_USE_COUNT; + return new; +} + +/* End - Generic routines for handling agp_memory structures */ + +static int agp_return_size(void) +{ + int current_size; + void *temp; + + temp = agp_bridge.current_size; + + switch (agp_bridge.size_type) { + case U8_APER_SIZE: + current_size = ((aper_size_info_8 *) temp)->size; + break; + case U16_APER_SIZE: + current_size = ((aper_size_info_16 *) temp)->size; + break; + case U32_APER_SIZE: + current_size = ((aper_size_info_32 *) temp)->size; + break; + case FIXED_APER_SIZE: + current_size = ((aper_size_info_fixed *) temp)->size; + break; + default: + current_size = 0; + break; + } + + return current_size; +} + +/* Routine to copy over information structure */ + +void agp_copy_info(agp_kern_info * info) +{ + memset(info, 0, sizeof(agp_kern_info)); + info->version.major = agp_bridge.version->major; + info->version.minor = agp_bridge.version->minor; + info->device = agp_bridge.dev; + info->chipset = agp_bridge.type; + info->mode = agp_bridge.mode; + info->aper_base = agp_bridge.gart_bus_addr; + info->aper_size = agp_return_size(); + info->max_memory = agp_bridge.max_memory_agp; + info->current_memory = atomic_read(&agp_bridge.current_memory_agp); +} + +/* End - Routine to copy over information structure */ + +/* + * Routines for handling swapping of agp_memory into the GATT - + * These routines take agp_memory and insert them into the GATT. + * They call device specific routines to actually write to the GATT. + */ + +int agp_bind_memory(agp_memory * curr, off_t pg_start) +{ + int ret_val; + + if ((curr == NULL) || (curr->is_bound == TRUE)) { + return -EINVAL; + } + if (curr->is_flushed == FALSE) { + CACHE_FLUSH(); + curr->is_flushed = TRUE; + } + ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type); + + if (ret_val != 0) { + return ret_val; + } + curr->is_bound = TRUE; + curr->pg_start = pg_start; + return 0; +} + +int agp_unbind_memory(agp_memory * curr) +{ + int ret_val; + + if (curr == NULL) { + return -EINVAL; + } + if (curr->is_bound != TRUE) { + return -EINVAL; + } + ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type); + + if (ret_val != 0) { + return ret_val; + } + curr->is_bound = FALSE; + curr->pg_start = 0; + return 0; +} + +/* End - Routines for handling swapping of agp_memory into the GATT */ + +/* + * Driver routines - start + * Currently this module supports the + * i810, 440lx, 440bx, 440gx, via vp3, via mvp3, + * amd irongate, ALi M1541 and generic support for the + * SiS chipsets. + */ + +/* Generic Agp routines - Start */ + +static void agp_generic_agp_enable(u32 mode) +{ + struct pci_dev *device = NULL; + u32 command, scratch, cap_id; + u8 cap_ptr; + + pci_read_config_dword(agp_bridge.dev, + agp_bridge.capndx + 4, + &command); + + /* + * PASS1: go throu all devices that claim to be + * AGP devices and collect their data. + */ + + while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, device)) != NULL) { + pci_read_config_dword(device, 0x04, &scratch); + + if (!(scratch & 0x00100000)) + continue; + + pci_read_config_byte(device, 0x34, &cap_ptr); + + if (cap_ptr != 0x00) { + do { + pci_read_config_dword(device, cap_ptr, &cap_id); + + if ((cap_id & 0xff) != 0x02) + cap_ptr = (cap_id >> 8) & 0xff; + } + while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00)); + } + if (cap_ptr != 0x00) { + /* + * Ok, here we have a AGP device. Disable impossible settings, + * and adjust the readqueue to the minimum. + */ + + pci_read_config_dword(device, cap_ptr + 4, &scratch); + + /* adjust RQ depth */ + command = + ((command & ~0xff000000) | + min((mode & 0xff000000), min((command & 0xff000000), (scratch & 0xff000000)))); + + /* disable SBA if it's not supported */ + if (!((command & 0x00000200) && (scratch & 0x00000200) && (mode & 0x00000200))) + command &= ~0x00000200; + + /* disable FW if it's not supported */ + if (!((command & 0x00000010) && (scratch & 0x00000010) && (mode & 0x00000010))) + command &= ~0x00000010; + + if (!((command & 4) && (scratch & 4) && (mode & 4))) + command &= ~0x00000004; + + if (!((command & 2) && (scratch & 2) && (mode & 2))) + command &= ~0x00000002; + + if (!((command & 1) && (scratch & 1) && (mode & 1))) + command &= ~0x00000001; + } + } + /* + * PASS2: Figure out the 4X/2X/1X setting and enable the + * target (our motherboard chipset). + */ + + if (command & 4) { + command &= ~3; /* 4X */ + } + if (command & 2) { + command &= ~5; /* 2X */ + } + if (command & 1) { + command &= ~6; /* 1X */ + } + command |= 0x00000100; + + pci_write_config_dword(agp_bridge.dev, + agp_bridge.capndx + 8, + command); + + /* + * PASS3: Go throu all AGP devices and update the + * command registers. + */ + + while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, device)) != NULL) { + pci_read_config_dword(device, 0x04, &scratch); + + if (!(scratch & 0x00100000)) + continue; + + pci_read_config_byte(device, 0x34, &cap_ptr); + + if (cap_ptr != 0x00) { + do { + pci_read_config_dword(device, cap_ptr, &cap_id); + + if ((cap_id & 0xff) != 0x02) + cap_ptr = (cap_id >> 8) & 0xff; + } + while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00)); + } + if (cap_ptr != 0x00) + pci_write_config_dword(device, cap_ptr + 8, command); + } +} + +static int agp_generic_create_gatt_table(void) +{ + char *table; + char *table_end; + int size; + int page_order; + int num_entries; + int i; + void *temp; + + table = NULL; + i = agp_bridge.aperture_size_idx; + temp = agp_bridge.current_size; + size = page_order = num_entries = 0; + + if (agp_bridge.size_type != FIXED_APER_SIZE) { + do { + switch (agp_bridge.size_type) { + case U8_APER_SIZE: + size = ((aper_size_info_8 *) temp)->size; + page_order = ((aper_size_info_8 *) temp)->page_order; + num_entries = ((aper_size_info_8 *) temp)->num_entries; + break; + case U16_APER_SIZE: + size = ((aper_size_info_16 *) temp)->size; + page_order = ((aper_size_info_16 *) temp)->page_order; + num_entries = ((aper_size_info_16 *) temp)->num_entries; + break; + case U32_APER_SIZE: + size = ((aper_size_info_32 *) temp)->size; + page_order = ((aper_size_info_32 *) temp)->page_order; + num_entries = ((aper_size_info_32 *) temp)->num_entries; + break; + /* This case will never really happen */ + case FIXED_APER_SIZE: + default: + size = page_order = num_entries = 0; + break; + } + + table = (char *) __get_free_pages(GFP_KERNEL, page_order); + + if (table == NULL) { + i++; + + switch (agp_bridge.size_type) { + case U8_APER_SIZE: + agp_bridge.current_size = (((aper_size_info_8 *) agp_bridge.aperture_sizes) + i); + break; + case U16_APER_SIZE: + agp_bridge.current_size = (((aper_size_info_16 *) agp_bridge.aperture_sizes) + i); + break; + case U32_APER_SIZE: + agp_bridge.current_size = (((aper_size_info_32 *) agp_bridge.aperture_sizes) + i); + break; + /* This case will never really happen */ + case FIXED_APER_SIZE: + default: + size = page_order = num_entries = 0; + break; + } + } else { + agp_bridge.aperture_size_idx = i; + } + } while ((table == NULL) && (i < agp_bridge.num_aperture_sizes)); + } else { + size = ((aper_size_info_fixed *) temp)->size; + page_order = ((aper_size_info_fixed *) temp)->page_order; + num_entries = ((aper_size_info_fixed *) temp)->num_entries; + table = (char *) __get_free_pages(GFP_KERNEL, page_order); + } + + if (table == NULL) { + return -ENOMEM; + } + table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); + + for (i = MAP_NR(table); i < MAP_NR(table_end); i++) { + set_bit(PG_reserved, &mem_map[i].flags); + } + + agp_bridge.gatt_table_real = (unsigned long *) table; + CACHE_FLUSH(); + agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), + (PAGE_SIZE * (1 << page_order))); + CACHE_FLUSH(); + + if (agp_bridge.gatt_table == NULL) { + for (i = MAP_NR(table); i < MAP_NR(table_end); i++) { + clear_bit(PG_reserved, &mem_map[i].flags); + } + + free_pages((unsigned long) table, page_order); + + return -ENOMEM; + } + agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real); + + for (i = 0; i < num_entries; i++) { + agp_bridge.gatt_table[i] = (unsigned long) agp_bridge.scratch_page; + } + + return 0; +} + +static int agp_generic_free_gatt_table(void) +{ + int i; + int page_order; + char *table, *table_end; + void *temp; + + temp = agp_bridge.current_size; + + switch (agp_bridge.size_type) { + case U8_APER_SIZE: + page_order = ((aper_size_info_8 *) temp)->page_order; + break; + case U16_APER_SIZE: + page_order = ((aper_size_info_16 *) temp)->page_order; + break; + case U32_APER_SIZE: + page_order = ((aper_size_info_32 *) temp)->page_order; + break; + case FIXED_APER_SIZE: + page_order = ((aper_size_info_fixed *) temp)->page_order; + break; + default: + page_order = 0; + break; + } + + /* Do not worry about freeing memory, because if this is + * called, then all agp memory is deallocated and removed + * from the table. + */ + + iounmap(agp_bridge.gatt_table); + table = (char *) agp_bridge.gatt_table_real; + table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); + + for (i = MAP_NR(table); i < MAP_NR(table_end); i++) { + clear_bit(PG_reserved, &mem_map[i].flags); + } + + free_pages((unsigned long) agp_bridge.gatt_table_real, page_order); + return 0; +} + +static int agp_generic_insert_memory(agp_memory * mem, + off_t pg_start, int type) +{ + int i, j, num_entries; + void *temp; + + temp = agp_bridge.current_size; + + switch (agp_bridge.size_type) { + case U8_APER_SIZE: + num_entries = ((aper_size_info_8 *) temp)->num_entries; + break; + case U16_APER_SIZE: + num_entries = ((aper_size_info_16 *) temp)->num_entries; + break; + case U32_APER_SIZE: + num_entries = ((aper_size_info_32 *) temp)->num_entries; + break; + case FIXED_APER_SIZE: + num_entries = ((aper_size_info_fixed *) temp)->num_entries; + break; + default: + num_entries = 0; + break; + } + + if (type != 0 || mem->type != 0) { + /* The generic routines know nothing of memory types */ + return -EINVAL; + } + if ((pg_start + mem->page_count) > num_entries) { + return -EINVAL; + } + j = pg_start; + + while (j < (pg_start + mem->page_count)) { + if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { + return -EBUSY; + } + j++; + } + + if (mem->is_flushed == FALSE) { + CACHE_FLUSH(); + mem->is_flushed = TRUE; + } + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + agp_bridge.gatt_table[j] = mem->memory[i]; + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +static int agp_generic_remove_memory(agp_memory * mem, off_t pg_start, + int type) +{ + int i; + + if (type != 0 || mem->type != 0) { + /* The generic routines know nothing of memory types */ + return -EINVAL; + } + for (i = pg_start; i < (mem->page_count + pg_start); i++) { + agp_bridge.gatt_table[i] = (unsigned long) agp_bridge.scratch_page; + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +static agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) +{ + return NULL; +} + +static void agp_generic_free_by_type(agp_memory * curr) +{ + if (curr->memory != NULL) { + vfree(curr->memory); + } + agp_free_key(curr->key); + kfree(curr); +} + +void agp_enable(u32 mode) +{ + agp_bridge.agp_enable(mode); +} + +/* End - Generic Agp routines */ + +#ifdef AGP_BUILD_INTEL_I810 + +static aper_size_info_fixed intel_i810_sizes[] = +{ + {64, 16384, 4}, + /* The 32M mode still requires a 64k gatt */ + {32, 8192, 4} +}; + +#define AGP_DCACHE_MEMORY 1 + +static gatt_mask intel_i810_masks[] = +{ + {I810_PTE_VALID, 0}, + {(I810_PTE_VALID | I810_PTE_LOCAL), AGP_DCACHE_MEMORY} +}; + +static struct _intel_i810_private { + struct pci_dev *i810_dev; /* device one */ + volatile unsigned char *registers; + int num_dcache_entries; +} intel_i810_private; + +static int intel_i810_fetch_size(void) +{ + u32 smram_miscc; + aper_size_info_fixed *values; + + pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc); + values = (aper_size_info_fixed *) agp_bridge.aperture_sizes; + + if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { + printk("agpgart: i810 is disabled\n"); + return 0; + } + if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + 1); + agp_bridge.aperture_size_idx = 1; + return values[1].size; + } else { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values); + agp_bridge.aperture_size_idx = 0; + return values[0].size; + } + + return 0; +} + +static int intel_i810_configure(void) +{ + aper_size_info_fixed *current_size; + u32 temp; + int i; + + current_size = (aper_size_info_fixed *) agp_bridge.current_size; + + pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp); + temp &= 0xfff80000; + + intel_i810_private.registers = + (volatile unsigned char *) ioremap(temp, 128 * 4096); + + if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL) + & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { + /* This will need to be dynamically assigned */ + printk("agpgart: detected 4MB dedicated video ram.\n"); + intel_i810_private.num_dcache_entries = 1024; + } + pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, + agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED); + CACHE_FLUSH(); + + if (agp_bridge.needs_scratch_page == TRUE) { + for (i = 0; i < current_size->num_entries; i++) { + OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4), + agp_bridge.scratch_page); + } + } + return 0; +} + +static void intel_i810_cleanup(void) +{ + OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0); + iounmap((void *) intel_i810_private.registers); +} + +static void intel_i810_tlbflush(agp_memory * mem) +{ + return; +} + +static void intel_i810_agp_enable(u32 mode) +{ + return; +} + +static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start, + int type) +{ + int i, j, num_entries; + void *temp; + + temp = agp_bridge.current_size; + num_entries = ((aper_size_info_fixed *) temp)->num_entries; + + if ((pg_start + mem->page_count) > num_entries) { + return -EINVAL; + } + for (j = pg_start; j < (pg_start + mem->page_count); j++) { + if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { + return -EBUSY; + } + } + + if (type != 0 || mem->type != 0) { + if ((type == AGP_DCACHE_MEMORY) && + (mem->type == AGP_DCACHE_MEMORY)) { + /* special insert */ + + for (i = pg_start; i < (pg_start + mem->page_count); i++) { + OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4), + (i * 4096) | I810_PTE_LOCAL | I810_PTE_VALID); + } + + agp_bridge.tlb_flush(mem); + return 0; + } + return -EINVAL; + } + if (mem->is_flushed == FALSE) { + CACHE_FLUSH(); + mem->is_flushed = TRUE; + } + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + OUTREG32(intel_i810_private.registers, + I810_PTE_BASE + (j * 4), mem->memory[i]); + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start, + int type) +{ + int i; + + for (i = pg_start; i < (mem->page_count + pg_start); i++) { + OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4), + agp_bridge.scratch_page); + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +static agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) +{ + agp_memory *new; + + if (type == AGP_DCACHE_MEMORY) { + if (pg_count != intel_i810_private.num_dcache_entries) { + return NULL; + } + new = agp_create_memory(1); + + if (new == NULL) { + return NULL; + } + new->type = AGP_DCACHE_MEMORY; + new->page_count = pg_count; + new->num_scratch_pages = 0; + vfree(new->memory); + return new; + } + return NULL; +} + +static void intel_i810_free_by_type(agp_memory * curr) +{ + agp_free_key(curr->key); + kfree(curr); +} + +static unsigned long intel_i810_mask_memory(unsigned long addr, int type) +{ + /* Type checking must be done elsewhere */ + return addr | agp_bridge.masks[type].mask; +} + +static void intel_i810_setup(struct pci_dev *i810_dev) +{ + intel_i810_private.i810_dev = i810_dev; + + agp_bridge.masks = intel_i810_masks; + agp_bridge.num_of_masks = 2; + agp_bridge.aperture_sizes = (void *) intel_i810_sizes; + agp_bridge.size_type = FIXED_APER_SIZE; + agp_bridge.num_aperture_sizes = 2; + agp_bridge.dev_private_data = (void *) &intel_i810_private; + agp_bridge.needs_scratch_page = TRUE; + agp_bridge.configure = intel_i810_configure; + agp_bridge.fetch_size = intel_i810_fetch_size; + agp_bridge.cleanup = intel_i810_cleanup; + agp_bridge.tlb_flush = intel_i810_tlbflush; + agp_bridge.mask_memory = intel_i810_mask_memory; + agp_bridge.agp_enable = intel_i810_agp_enable; +#ifdef __SMP__ + agp_bridge.cache_flush = smp_flush_cache; +#else + agp_bridge.cache_flush = flush_cache; +#endif + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = intel_i810_insert_entries; + agp_bridge.remove_memory = intel_i810_remove_entries; + agp_bridge.alloc_by_type = intel_i810_alloc_by_type; + agp_bridge.free_by_type = intel_i810_free_by_type; +} + +#endif + +#ifdef AGP_BUILD_INTEL_GENERIC + +static int intel_fetch_size(void) +{ + int i; + u16 temp; + aper_size_info_16 *values; + + pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp); + (void *) values = agp_bridge.aperture_sizes; + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static void intel_tlbflush(agp_memory * mem) +{ + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200); + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280); +} + +static void intel_cleanup(void) +{ + u16 temp; + + pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp); + pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9)); +} + +static int intel_configure(void) +{ + u32 temp; + u16 temp2; + aper_size_info_16 *current_size; + + current_size = (aper_size_info_16 *) agp_bridge.current_size; + + /* aperture size */ + pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, current_size->size_value); + + /* address to map to */ + pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* attbase - aperture base */ + pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, agp_bridge.gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280); + + /* paccfg/nbxcfg */ + pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2); + pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, (temp2 & ~(1 << 10)) | (1 << 9)); + /* clear any possible error conditions */ + pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7); + return 0; +} + +static unsigned long intel_mask_memory(unsigned long addr, int type) +{ + /* Memory type is ignored */ + + return addr | agp_bridge.masks[0].mask; +} + + +/* Setup function */ +static gatt_mask intel_generic_masks[] = +{ + {0x00000017, 0} +}; + +static aper_size_info_16 intel_generic_sizes[7] = +{ + {256, 65536, 6, 0}, + {128, 32768, 5, 32}, + {64, 16384, 4, 48}, + {32, 8192, 3, 56}, + {16, 4096, 2, 60}, + {8, 2048, 1, 62}, + {4, 1024, 0, 63} +}; + +static void intel_generic_setup(void) +{ + agp_bridge.masks = intel_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) intel_generic_sizes; + agp_bridge.size_type = U16_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = intel_configure; + agp_bridge.fetch_size = intel_fetch_size; + agp_bridge.cleanup = intel_cleanup; + agp_bridge.tlb_flush = intel_tlbflush; + agp_bridge.mask_memory = intel_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; +#ifdef __SMP__ + agp_bridge.cache_flush = smp_flush_cache; +#else + agp_bridge.cache_flush = flush_cache; +#endif + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; +} + +#endif + +#ifdef AGP_BUILD_VIA_GENERIC + +static int via_fetch_size(void) +{ + int i; + u8 temp; + aper_size_info_8 *values; + + (void *) values = agp_bridge.aperture_sizes; + pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp); + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static int via_configure(void) +{ + u32 temp; + aper_size_info_8 *current_size; + + current_size = (aper_size_info_8 *) agp_bridge.current_size; + /* aperture size */ + pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, current_size->size_value); + /* address to map too */ + pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* GART control register */ + pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f); + + /* attbase - aperture GATT base */ + pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, + (agp_bridge.gatt_bus_addr & 0xfffff000) | 3); + return 0; +} + +static void via_cleanup(void) +{ + aper_size_info_8 *previous_size; + + previous_size = (aper_size_info_8 *) agp_bridge.previous_size; + pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, 0); + pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, previous_size->size_value); +} + +static void via_tlbflush(agp_memory * mem) +{ + pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000008f); + pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f); +} + +static unsigned long via_mask_memory(unsigned long addr, int type) +{ + /* Memory type is ignored */ + + return addr | agp_bridge.masks[0].mask; +} + +static aper_size_info_8 via_generic_sizes[7] = +{ + {256, 65536, 6, 0}, + {128, 32768, 5, 128}, + {64, 16384, 4, 192}, + {32, 8192, 3, 224}, + {16, 4096, 2, 240}, + {8, 2048, 1, 248}, + {4, 1024, 0, 252} +}; + +static gatt_mask via_generic_masks[] = +{ + {0x00000000, 0} +}; + +static void via_generic_setup(void) +{ + agp_bridge.masks = via_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) via_generic_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = via_configure; + agp_bridge.fetch_size = via_fetch_size; + agp_bridge.cleanup = via_cleanup; + agp_bridge.tlb_flush = via_tlbflush; + agp_bridge.mask_memory = via_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; +#ifdef __SMP__ + agp_bridge.cache_flush = smp_flush_cache; +#else + agp_bridge.cache_flush = flush_cache; +#endif + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; +} + +#endif + +#ifdef AGP_BUILD_SIS_GENERIC + +static int sis_fetch_size(void) +{ + u8 temp_size; + int i; + aper_size_info_8 *values; + + pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size); + (void *) values = agp_bridge.aperture_sizes; + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if ((temp_size == values[i].size_value) || + ((temp_size & ~(0x03)) == (values[i].size_value & ~(0x03)))) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + + +static void sis_tlbflush(agp_memory * mem) +{ + pci_write_config_byte(agp_bridge.dev, SIS_TLBFLUSH, 0x02); +} + +static int sis_configure(void) +{ + u32 temp; + aper_size_info_8 *current_size; + + current_size = (aper_size_info_8 *) agp_bridge.current_size; + pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05); + pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE, agp_bridge.gatt_bus_addr); + pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, current_size->size_value); + return 0; +} + +static void sis_cleanup(void) +{ + aper_size_info_8 *previous_size; + + previous_size = (aper_size_info_8 *) agp_bridge.previous_size; + pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, (previous_size->size_value & ~(0x03))); +} + +static unsigned long sis_mask_memory(unsigned long addr, int type) +{ + /* Memory type is ignored */ + + return addr | agp_bridge.masks[0].mask; +} + +static aper_size_info_8 sis_generic_sizes[7] = +{ + {256, 65536, 6, 99}, + {128, 32768, 5, 83}, + {64, 16384, 4, 67}, + {32, 8192, 3, 51}, + {16, 4096, 2, 35}, + {8, 2048, 1, 19}, + {4, 1024, 0, 3} +}; + +static gatt_mask sis_generic_masks[] = +{ + {0x00000000, 0} +}; + +static void sis_generic_setup(void) +{ + agp_bridge.masks = sis_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) sis_generic_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = sis_configure; + agp_bridge.fetch_size = sis_fetch_size; + agp_bridge.cleanup = sis_cleanup; + agp_bridge.tlb_flush = sis_tlbflush; + agp_bridge.mask_memory = sis_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; +#ifdef __SMP__ + agp_bridge.cache_flush = smp_flush_cache; +#else + agp_bridge.cache_flush = flush_cache; +#endif + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; +} + +#endif + +#ifdef AGP_BUILD_AMD_IRONGATE + +static struct _amd_irongate_private { + volatile unsigned char *registers; +} amd_irongate_private; + +static int amd_irongate_fetch_size(void) +{ + int i; + u32 temp; + aper_size_info_32 *values; + + pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); + temp = (temp & 0x0000000e); + (void *) values = agp_bridge.aperture_sizes; + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static int amd_irongate_configure(void) +{ + aper_size_info_32 *current_size; + u32 temp; + u16 enable_reg; + + current_size = (aper_size_info_32 *) agp_bridge.current_size; + + /* Get the memory mapped registers */ + pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp); + temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); + amd_irongate_private.registers = (volatile unsigned char *) ioremap(temp, 4096); + + /* Write out the address of the gatt table */ + OUTREG32(amd_irongate_private.registers, AMD_ATTBASE, agp_bridge.gatt_bus_addr); + + /* Write the Sync register */ + pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80); + + /* Write the enable register */ + enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE); + enable_reg = (enable_reg | 0x0004); + OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg); + + /* Write out the size register */ + pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); + temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 0x00000001); + pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp); + + /* Flush the tlb */ + OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001); + + /* Get the address for the gart region */ + pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp); + temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); + agp_bridge.gart_bus_addr = temp; + return 0; +} + +static void amd_irongate_cleanup(void) +{ + aper_size_info_32 *previous_size; + u32 temp; + u16 enable_reg; + + previous_size = (aper_size_info_32 *) agp_bridge.previous_size; + + enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE); + enable_reg = (enable_reg & ~(0x0004)); + OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg); + + /* Write back the previous size and disable gart translation */ + pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); + temp = ((temp & ~(0x0000000f)) | previous_size->size_value); + pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp); + iounmap((void *) amd_irongate_private.registers); +} + +/* + * This routine could be implemented by taking the addresses + * written to the GATT, and flushing them individually. However + * currently it just flushes the whole table. Which is probably + * more efficent, since agp_memory blocks can be a large number of + * entries. + */ + +static void amd_irongate_tlbflush(agp_memory * temp) +{ + OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001); +} + +static unsigned long amd_irongate_mask_memory(unsigned long addr, int type) +{ + /* Only type 0 is supported by the irongate */ + + return addr | agp_bridge.masks[0].mask; +} + +static aper_size_info_32 amd_irongate_sizes[7] = +{ + {2048, 524288, 9, 0x0000000c}, + {1024, 262144, 8, 0x0000000a}, + {512, 131072, 7, 0x00000008}, + {256, 65536, 6, 0x00000006}, + {128, 32768, 5, 0x00000004}, + {64, 16384, 4, 0x00000002}, + {32, 8192, 3, 0x00000000} +}; + +static gatt_mask amd_irongate_masks[] = +{ + {0x00000001, 0} +}; + +static void amd_irongate_setup(void) +{ + agp_bridge.masks = amd_irongate_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) amd_irongate_sizes; + agp_bridge.size_type = U32_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = (void *) &amd_irongate_private; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = amd_irongate_configure; + agp_bridge.fetch_size = amd_irongate_fetch_size; + agp_bridge.cleanup = amd_irongate_cleanup; + agp_bridge.tlb_flush = amd_irongate_tlbflush; + agp_bridge.mask_memory = amd_irongate_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; +#ifdef __SMP__ + agp_bridge.cache_flush = smp_flush_cache; +#else + agp_bridge.cache_flush = flush_cache; +#endif + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; +} + +#endif + +#ifdef AGP_BUILD_ALI_M1541 + +static int ali_fetch_size(void) +{ + int i; + u32 temp; + aper_size_info_32 *values; + + pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp); + temp &= ~(0xfffffff0); + (void *) values = agp_bridge.aperture_sizes; + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static void ali_tlbflush(agp_memory * mem) +{ + u32 temp; + + pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); + pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, + ((temp & 0xffffff00) | 0x00000090)); + pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, + ((temp & 0xffffff00) | 0x00000010)); +} + +static void ali_cleanup(void) +{ + aper_size_info_32 *previous_size; + u32 temp; + + previous_size = (aper_size_info_32 *) agp_bridge.previous_size; + + pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); + pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, + ((temp & 0xffffff00) | 0x00000090)); + pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, previous_size->size_value); +} + +static int ali_configure(void) +{ + u32 temp; + aper_size_info_32 *current_size; + + current_size = (aper_size_info_32 *) agp_bridge.current_size; + + /* aperture size and gatt addr */ + pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, + agp_bridge.gatt_bus_addr | current_size->size_value); + + /* tlb control */ + pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); + pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, + ((temp & 0xffffff00) | 0x00000010)); + + /* address to map to */ + pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + return 0; +} + +static unsigned long ali_mask_memory(unsigned long addr, int type) +{ + /* Memory type is ignored */ + + return addr | agp_bridge.masks[0].mask; +} + + +/* Setup function */ +static gatt_mask ali_generic_masks[] = +{ + {0x00000000, 0} +}; + +static aper_size_info_32 ali_generic_sizes[7] = +{ + {256, 65536, 6, 10}, + {128, 32768, 5, 9}, + {64, 16384, 4, 8}, + {32, 8192, 3, 7}, + {16, 4096, 2, 6}, + {8, 2048, 1, 4}, + {4, 1024, 0, 3} +}; + +static void ali_generic_setup(void) +{ + agp_bridge.masks = ali_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) ali_generic_sizes; + agp_bridge.size_type = U32_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = ali_configure; + agp_bridge.fetch_size = ali_fetch_size; + agp_bridge.cleanup = ali_cleanup; + agp_bridge.tlb_flush = ali_tlbflush; + agp_bridge.mask_memory = ali_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; +#ifdef __SMP__ + agp_bridge.cache_flush = smp_flush_cache; +#else + agp_bridge.cache_flush = flush_cache; +#endif + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; +} + +#endif + + + +/* Supported Device Scanning routine */ + +static void agp_find_supported_device(void) +{ + struct pci_dev *dev = NULL; + u8 cap_ptr = 0x00; + u32 cap_id, scratch; + + if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL) { + agp_bridge.type = NOT_SUPPORTED; + return; + } + agp_bridge.dev = dev; + + /* Need to test for I810 here */ +#ifdef AGP_BUILD_INTEL_I810 + if (dev->vendor == PCI_VENDOR_ID_INTEL) { + struct pci_dev *i810_dev; + + switch (dev->device) { + case PCI_DEVICE_ID_INTEL_810_0: + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_810_1, + NULL); + if (i810_dev == NULL) { + printk("agpgart: Detected an Intel i810, but could not find the secondary device.\n"); + agp_bridge.type = NOT_SUPPORTED; + return; + } + printk("agpgart: Detected an Intel i810 Chipset.\n"); + agp_bridge.type = INTEL_I810; + agp_bridge.intel_i810_setup(i810_dev); + return; + + case PCI_DEVICE_ID_INTEL_810_DC100_0: + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_810_DC100_1, + NULL); + if (i810_dev == NULL) { + printk("agpgart: Detected an Intel i810 DC100, but could not find the secondary device.\n"); + agp_bridge.type = NOT_SUPPORTED; + return; + } + printk("agpgart: Detected an Intel i810 DC100 Chipset.\n"); + agp_bridge.type = INTEL_I810; + agp_bridge.intel_i810_setup(i810_dev); + return; + + case PCI_DEVICE_ID_INTEL_810_E_0: + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_810_E_1, + NULL); + if (i810_dev == NULL) { + printk("agpgart: Detected an Intel i810 E, but could not find the secondary device.\n"); + agp_bridge.type = NOT_SUPPORTED; + return; + } + printk("agpgart: Detected an Intel i810 E Chipset.\n"); + agp_bridge.type = INTEL_I810; + agp_bridge.intel_i810_setup(i810_dev); + return; + default: + break; + } + } +#endif + /* find capndx */ + pci_read_config_dword(dev, 0x04, &scratch); + + if (!(scratch & 0x00100000)) { + agp_bridge.type = NOT_SUPPORTED; + return; + } + pci_read_config_byte(dev, 0x34, &cap_ptr); + + if (cap_ptr != 0x00) { + do { + pci_read_config_dword(dev, cap_ptr, &cap_id); + + if ((cap_id & 0xff) != 0x02) + cap_ptr = (cap_id >> 8) & 0xff; + } + while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00)); + } + if (cap_ptr == 0x00) { + agp_bridge.type = NOT_SUPPORTED; + return; + } + agp_bridge.capndx = cap_ptr; + + /* Fill in the mode register */ + pci_read_config_dword(agp_bridge.dev, + agp_bridge.capndx + 4, + &agp_bridge.mode); + + switch (dev->vendor) { +#ifdef AGP_BUILD_INTEL_GENERIC + case PCI_VENDOR_ID_INTEL: + switch (dev->device) { + case PCI_DEVICE_ID_INTEL_82443LX_0: + agp_bridge.type = INTEL_LX; + printk("agpgart: Detected an Intel 440LX Chipset.\n"); + agp_bridge.intel_generic_setup(); + return; + + case PCI_DEVICE_ID_INTEL_82443BX_0: + agp_bridge.type = INTEL_BX; + printk("agpgart: Detected an Intel 440BX Chipset.\n"); + agp_bridge.intel_generic_setup(); + return; + + case PCI_DEVICE_ID_INTEL_82443GX_0: + agp_bridge.type = INTEL_GX; + printk("agpgart: Detected an Intel 440GX Chipset.\n"); + agp_bridge.intel_generic_setup(); + return; + + default: + if (agp_try_unsupported != 0) { + printk("agpgart: Trying generic intel routines for device id: %x\n", dev->device); + agp_bridge.type = INTEL_GENERIC; + agp_bridge.intel_generic_setup(); + return; + } else { + printk("agpgart: Unsupported intel chipset, you might want to try agp_try_unsupported=1.\n"); + agp_bridge.type = NOT_SUPPORTED; + return; + } + } + break; +#endif + +#ifdef AGP_BUILD_VIA_GENERIC + case PCI_VENDOR_ID_VIA: + switch (dev->device) { + case PCI_DEVICE_ID_VIA_82C597_0: + agp_bridge.type = VIA_VP3; + printk("agpgart: Detected a VIA VP3 Chipset.\n"); + agp_bridge.via_generic_setup(); + return; + + case PCI_DEVICE_ID_VIA_82C598_0: + agp_bridge.type = VIA_MVP3; + printk("agpgart: Detected a VIA MVP3 Chipset.\n"); + agp_bridge.via_generic_setup(); + return; + + case PCI_DEVICE_ID_VIA_82C691_0: + agp_bridge.type = VIA_APOLLO_PRO; + printk("agpgart: Detected a VIA Apollo Pro Chipset.\n"); + agp_bridge.via_generic_setup(); + return; + + default: + if (agp_try_unsupported != 0) { + printk("agpgart: Trying generic VIA routines for device id: %x\n", dev->device); + agp_bridge.type = VIA_GENERIC; + agp_bridge.via_generic_setup(); + return; + } else { + printk("agpgart: Unsupported VIA chipset, you might want to try agp_try_unsupported=1.\n"); + agp_bridge.type = NOT_SUPPORTED; + return; + } + } + break; +#endif + +#ifdef AGP_BUILD_SIS_GENERIC + case PCI_VENDOR_ID_SI: + switch (dev->device) { + /* ToDo need to find out the specific devices supported */ + default: + if (agp_try_unsupported != 0) { + printk("agpgart: Trying generic SiS routines for device id: %x\n", dev->device); + agp_bridge.type = SIS_GENERIC; + agp_bridge.sis_generic_setup(); + return; + } else { + printk("agpgart: Unsupported SiS chipset, you might want to try agp_try_unsupported=1.\n"); + agp_bridge.type = NOT_SUPPORTED; + return; + } + } + break; +#endif + +#ifdef AGP_BUILD_AMD_IRONGATE + case PCI_VENDOR_ID_AMD: + switch (dev->device) { + case PCI_DEVICE_ID_AMD_IRONGATE_0: + agp_bridge.type = AMD_IRONGATE; + printk("agpgart: Detected an AMD Irongate Chipset.\n"); + agp_bridge.amd_irongate_setup(); + return; + + default: + if (agp_try_unsupported != 0) { + printk("agpgart: Trying Amd irongate routines for device id: %x\n", dev->device); + agp_bridge.type = AMD_GENERIC; + agp_bridge.amd_irongate_setup(); + return; + } else { + printk("agpgart: Unsupported Amd chipset, you might want to try agp_try_unsupported=1.\n"); + agp_bridge.type = NOT_SUPPORTED; + return; + } + } + break; +#endif + +#ifdef AGP_BUILD_ALI_M1541 + case PCI_VENDOR_ID_AL: + switch (dev->device) { + case PCI_DEVICE_ID_AL_M1541_0: + agp_bridge.type = ALI_M1541; + printk("agpgart: Detected an ALi M1541 Chipset\n"); + agp_bridge.ali_generic_setup(); + return; + default: + if (agp_try_unsupported != 0) { + printk("agpgart: Trying ALi generic routines for device id: %x\n", dev->device); + agp_bridge.type = ALI_GENERIC; + agp_bridge.ali_generic_setup(); + return; + } else { + printk("agpgart: Unsupported ALi chipset, you might want to type agp_try_unsupported=1.\n"); + agp_bridge.type = NOT_SUPPORTED; + return; + } + } + break; +#endif + default: + agp_bridge.type = NOT_SUPPORTED; + return; + } +} + +struct agp_max_table { + int mem; + int agp; +}; + +static struct agp_max_table agp_maxes_table[9] = +{ + {0, 0}, + {32, 4}, + {64, 28}, + {128, 96}, + {256, 204}, + {512, 440}, + {1024, 942}, + {2048, 1920}, + {4096, 3932} +}; + +static int agp_find_max(void) +{ + int memory; + float t; + int index; + int result; + + memory = virt_to_phys(high_memory) / 0x100000; + index = 0; + + while ((memory > agp_maxes_table[index].mem) && + (index < 8)) { + index++; + } + + t = (memory - agp_maxes_table[index - 1].mem) / + (agp_maxes_table[index].mem - agp_maxes_table[index - 1].mem); + + result = agp_maxes_table[index - 1].agp + + (t * (agp_maxes_table[index].agp - agp_maxes_table[index - 1].agp)); + + printk("agpgart: Maximum main memory to use for agp memory: %dM\n", result); + result = (result * 0x100000) / 4096; + return result; +} + +#define AGPGART_VERSION_MAJOR 0 +#define AGPGART_VERSION_MINOR 99 + +static agp_version agp_current_version = +{ + AGPGART_VERSION_MAJOR, + AGPGART_VERSION_MINOR +}; + +static int agp_backend_initialize(void) +{ + int size_value; + + memset(&agp_bridge, 0, sizeof(struct agp_bridge_data)); + agp_bridge.type = NOT_SUPPORTED; +#ifdef AGP_BUILD_INTEL_GENERIC + agp_bridge.intel_generic_setup = intel_generic_setup; +#endif +#ifdef AGP_BUILD_INTEL_I810 + agp_bridge.intel_i810_setup = intel_i810_setup; +#endif +#ifdef AGP_BUILD_VIA_GENERIC + agp_bridge.via_generic_setup = via_generic_setup; +#endif +#ifdef AGP_BUILD_SIS_GENERIC + agp_bridge.sis_generic_setup = sis_generic_setup; +#endif +#ifdef AGP_BUILD_AMD_IRONGATE + agp_bridge.amd_irongate_setup = amd_irongate_setup; +#endif +#ifdef AGP_BUILD_ALI_M1541 + agp_bridge.ali_generic_setup = ali_generic_setup; +#endif + agp_bridge.max_memory_agp = agp_find_max(); + agp_bridge.version = &agp_current_version; + agp_find_supported_device(); + + if (agp_bridge.needs_scratch_page == TRUE) { + agp_bridge.scratch_page = (unsigned long) agp_alloc_page(); + + if ((void *) (agp_bridge.scratch_page) == NULL) { + printk("agpgart: unable to get memory for scratch page.\n"); + return -ENOMEM; + } + agp_bridge.scratch_page = virt_to_phys((void *) agp_bridge.scratch_page); + agp_bridge.scratch_page = agp_bridge.mask_memory(agp_bridge.scratch_page, 0); + } + if (agp_bridge.type == NOT_SUPPORTED) { + printk("agpgart: no supported devices found.\n"); + return -EINVAL; + } + size_value = agp_bridge.fetch_size(); + + if (size_value == 0) { + printk("agpgart: unable to detrimine aperture size.\n"); + return -EINVAL; + } + if (agp_bridge.create_gatt_table()) { + printk("agpgart: unable to get memory for graphics translation table.\n"); + return -ENOMEM; + } + agp_bridge.key_list = vmalloc(PAGE_SIZE * 4); + + if (agp_bridge.key_list == NULL) { + printk("agpgart: error allocating memory for key lists.\n"); + agp_bridge.free_gatt_table(); + return -ENOMEM; + } + memset(agp_bridge.key_list, 0, PAGE_SIZE * 4); + + if (agp_bridge.configure()) { + printk("agpgart: error configuring host chipset.\n"); + agp_bridge.free_gatt_table(); + vfree(agp_bridge.key_list); + return -EINVAL; + } + printk("agpgart: Physical address of the agp aperture: 0x%lx\n", agp_bridge.gart_bus_addr); + printk("agpgart: Agp aperture is %dM in size.\n", size_value); + return 0; +} + +static void agp_backend_cleanup(void) +{ + agp_bridge.cleanup(); + agp_bridge.free_gatt_table(); + vfree(agp_bridge.key_list); + + if (agp_bridge.needs_scratch_page == TRUE) { + agp_bridge.scratch_page &= ~(0x00000fff); + agp_destroy_page((void *) phys_to_virt(agp_bridge.scratch_page)); + } +} + +extern int agp_frontend_initialize(void); +extern void agp_frontend_cleanup(void); + +#ifdef MODULE +int init_module(void) +{ + int ret_val; + + printk("Linux agpgart interface v%d.%d (c) Jeff Hartmann\n", + AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR); + ret_val = agp_backend_initialize(); + + if (ret_val != 0) { + return ret_val; + } + ret_val = agp_frontend_initialize(); + + if (ret_val != 0) { + agp_backend_cleanup(); + return ret_val; + } + return 0; +} + +void cleanup_module(void) +{ + agp_frontend_cleanup(); + agp_backend_cleanup(); +} + +#endif --- linux/drivers/char/agp/agp_backendP.h.newagpdist Fri Feb 11 14:50:45 2000 +++ linux/drivers/char/agp/agp_backendP.h Fri Feb 11 14:50:45 2000 @@ -0,0 +1,244 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight + * Copyright (C) 1999 Xi Graphics + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _AGP_BACKEND_PRIV_H +#define _AGP_BACKEND_PRIV_H 1 + +enum aper_size_type { + U8_APER_SIZE, + U16_APER_SIZE, + U32_APER_SIZE, + FIXED_APER_SIZE +}; + +typedef struct _gatt_mask { + unsigned long mask; + u32 type; + /* totally device specific, for integrated chipsets that + * might have different types of memory masks. For other + * devices this will probably be ignored */ +} gatt_mask; + +typedef struct _aper_size_info_8 { + int size; + int num_entries; + int page_order; + u8 size_value; +} aper_size_info_8; + +typedef struct _aper_size_info_16 { + int size; + int num_entries; + int page_order; + u16 size_value; +} aper_size_info_16; + +typedef struct _aper_size_info_32 { + int size; + int num_entries; + int page_order; + u32 size_value; +} aper_size_info_32; + +typedef struct _aper_size_info_fixed { + int size; + int num_entries; + int page_order; +} aper_size_info_fixed; + +struct agp_bridge_data { + agp_version *version; + void *aperture_sizes; + void *previous_size; + void *current_size; + void *dev_private_data; + struct pci_dev *dev; + gatt_mask *masks; + unsigned long *gatt_table; + unsigned long *gatt_table_real; + unsigned long scratch_page; + unsigned long gart_bus_addr; + unsigned long gatt_bus_addr; + u32 mode; + enum chipset_type type; + enum aper_size_type size_type; + u32 *key_list; + atomic_t current_memory_agp; + atomic_t agp_in_use; + int max_memory_agp; /* in number of pages */ + int needs_scratch_page; + int aperture_size_idx; + int num_aperture_sizes; + int num_of_masks; + int capndx; + + /* Links to driver specific functions */ + + int (*fetch_size) (void); /* returns the index into the size table */ + int (*configure) (void); + void (*agp_enable) (u32); + void (*cleanup) (void); + void (*tlb_flush) (agp_memory *); + unsigned long (*mask_memory) (unsigned long, int); + void (*cache_flush) (void); + int (*create_gatt_table) (void); + int (*free_gatt_table) (void); + int (*insert_memory) (agp_memory *, off_t, int); + int (*remove_memory) (agp_memory *, off_t, int); + agp_memory *(*alloc_by_type) (size_t, int); + void (*free_by_type) (agp_memory *); + + /* Links to vendor/device specific setup functions */ +#ifdef AGP_BUILD_INTEL_GENERIC + void (*intel_generic_setup) (void); +#endif +#ifdef AGP_BUILD_INTEL_I810 + void (*intel_i810_setup) (struct pci_dev *); +#endif +#ifdef AGP_BUILD_VIA_GENERIC + void (*via_generic_setup) (void); +#endif +#ifdef AGP_BUILD_SIS_GENERIC + void (*sis_generic_setup) (void); +#endif +#ifdef AGP_BUILD_AMD_IRONGATE + void (*amd_irongate_setup) (void); +#endif +#ifdef AGP_BUILD_ALI_M1541 + void (*ali_generic_setup) (void); +#endif +}; + +#define OUTREG32(mmap, addr, val) *(volatile u32 *)(mmap + (addr)) = (val) +#define OUTREG16(mmap, addr, val) *(volatile u16 *)(mmap + (addr)) = (val) +#define OUTREG8 (mmap, addr, val) *(volatile u8 *) (mmap + (addr)) = (val) + +#define INREG32(mmap, addr) *(volatile u32 *)(mmap + (addr)) +#define INREG16(mmap, addr) *(volatile u16 *)(mmap + (addr)) +#define INREG8 (mmap, addr) *(volatile u8 *) (mmap + (addr)) + +#ifndef min +#define min(a,b) (((a)<(b))?(a):(b)) +#endif + +#define PGE_EMPTY(p) (!(p) || (p) == (unsigned long) agp_bridge.scratch_page) + +#ifndef PCI_DEVICE_ID_VIA_82C691_0 +#define PCI_DEVICE_ID_VIA_82C691_0 0x0691 +#endif +#ifndef PCI_DEVICE_ID_VIA_82C691_1 +#define PCI_DEVICE_ID_VIA_82C691_1 0x8691 +#endif +#ifndef PCI_DEVICE_ID_INTEL_810_0 +#define PCI_DEVICE_ID_INTEL_810_0 0x7120 +#endif +#ifndef PCI_DEVICE_ID_INTEL_810_DC100_0 +#define PCI_DEVICE_ID_INTEL_810_DC100_0 0x7122 +#endif +#ifndef PCI_DEVICE_ID_INTEL_810_E_0 +#define PCI_DEVICE_ID_INTEL_810_E_0 0x7124 +#endif +#ifndef PCI_DEVICE_ID_INTEL_82443GX_0 +#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0 +#endif +#ifndef PCI_DEVICE_ID_INTEL_810_1 +#define PCI_DEVICE_ID_INTEL_810_1 0x7121 +#endif +#ifndef PCI_DEVICE_ID_INTEL_810_DC100_1 +#define PCI_DEVICE_ID_INTEL_810_DC100_1 0x7123 +#endif +#ifndef PCI_DEVICE_ID_INTEL_810_E_1 +#define PCI_DEVICE_ID_INTEL_810_E_1 0x7125 +#endif +#ifndef PCI_DEVICE_ID_INTEL_82443GX_1 +#define PCI_DEVICE_ID_INTEL_82443GX_1 0x71a1 +#endif +#ifndef PCI_DEVICE_ID_AMD_IRONGATE_0 +#define PCI_DEVICE_ID_AMD_IRONGATE_0 0x7006 +#endif +#ifndef PCI_VENDOR_ID_AL +#define PCI_VENDOR_ID_AL 0x10b9 +#endif +#ifndef PCI_DEVICE_ID_AL_M1541_0 +#define PCI_DEVICE_ID_AL_M1541_0 0x1541 +#endif + +/* intel register */ +#define INTEL_APBASE 0x10 +#define INTEL_APSIZE 0xb4 +#define INTEL_ATTBASE 0xb8 +#define INTEL_AGPCTRL 0xb0 +#define INTEL_NBXCFG 0x50 +#define INTEL_ERRSTS 0x91 + +/* intel i810 registers */ +#define I810_GMADDR 0x10 +#define I810_MMADDR 0x14 +#define I810_PTE_BASE 0x10000 +#define I810_PTE_MAIN_UNCACHED 0x00000000 +#define I810_PTE_LOCAL 0x00000002 +#define I810_PTE_VALID 0x00000001 +#define I810_SMRAM_MISCC 0x70 +#define I810_GFX_MEM_WIN_SIZE 0x00010000 +#define I810_GFX_MEM_WIN_32M 0x00010000 +#define I810_GMS 0x000000c0 +#define I810_GMS_DISABLE 0x00000000 +#define I810_PGETBL_CTL 0x2020 +#define I810_PGETBL_ENABLED 0x00000001 +#define I810_DRAM_CTL 0x3000 +#define I810_DRAM_ROW_0 0x00000001 +#define I810_DRAM_ROW_0_SDRAM 0x00000001 + +/* VIA register */ +#define VIA_APBASE 0x10 +#define VIA_GARTCTRL 0x80 +#define VIA_APSIZE 0x84 +#define VIA_ATTBASE 0x88 + +/* SiS registers */ +#define SIS_APBASE 0x10 +#define SIS_ATTBASE 0x90 +#define SIS_APSIZE 0x94 +#define SIS_TLBCNTRL 0x97 +#define SIS_TLBFLUSH 0x98 + +/* AMD registers */ +#define AMD_APBASE 0x10 +#define AMD_MMBASE 0x14 +#define AMD_APSIZE 0xac +#define AMD_MODECNTL 0xb0 +#define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */ +#define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */ +#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */ +#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */ + +/* ALi registers */ +#define ALI_APBASE 0x10 +#define ALI_AGPCTRL 0xb8 +#define ALI_ATTBASE 0xbc +#define ALI_TLBCTRL 0xc0 + +#endif /* _AGP_BACKEND_PRIV_H */ --- linux/drivers/char/agp/agpgart_fe.c.newagpdist Fri Feb 11 14:50:45 2000 +++ linux/drivers/char/agp/agpgart_fe.c Fri Feb 11 14:50:45 2000 @@ -0,0 +1,1087 @@ +/* + * AGPGART module frontend version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight + * Copyright (C) 1999 Xi Graphics + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#define __NO_VERSION__ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct agp_front_data agp_fe; + +static agp_memory *agp_find_mem_by_key(int key) +{ + agp_memory *curr; + + if (agp_fe.current_controller == NULL) { + return NULL; + } + curr = agp_fe.current_controller->pool; + + while (curr != NULL) { + if (curr->key == key) { + return curr; + } + curr = curr->next; + } + + return NULL; +} + +static void agp_remove_from_pool(agp_memory * temp) +{ + agp_memory *prev; + agp_memory *next; + + /* Check to see if this is even in the memory pool */ + + if (agp_find_mem_by_key(temp->key) != NULL) { + next = temp->next; + prev = temp->prev; + + if (prev != NULL) { + prev->next = next; + if (next != NULL) { + next->prev = prev; + } + } else { + /* This is the first item on the list */ + if (next != NULL) { + next->prev = NULL; + } + agp_fe.current_controller->pool = next; + } + } +} + +/* + * Routines for managing each client's segment list - + * These routines handle adding and removing segments + * to each auth'ed client. + */ + +static agp_segment_priv *agp_find_seg_in_client(const agp_client * client, + unsigned long offset, + int size, pgprot_t page_prot) +{ + agp_segment_priv *seg; + int num_segments, pg_start, pg_count, i; + + pg_start = offset / 4096; + pg_count = size / 4096; + seg = *(client->segments); + num_segments = client->num_segments; + + for (i = 0; i < client->num_segments; i++) { + if ((seg[i].pg_start == pg_start) && + (seg[i].pg_count == pg_count) && + (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) { + return seg + i; + } + } + + return NULL; +} + +static void agp_remove_seg_from_client(agp_client * client) +{ + if (client->segments != NULL) { + if (*(client->segments) != NULL) { + kfree(*(client->segments)); + } + kfree(client->segments); + } +} + +static void agp_add_seg_to_client(agp_client * client, + agp_segment_priv ** seg, int num_segments) +{ + agp_segment_priv **prev_seg; + + prev_seg = client->segments; + + if (prev_seg != NULL) { + agp_remove_seg_from_client(client); + } + client->num_segments = num_segments; + client->segments = seg; +} + +/* Originally taken from linux/mm/mmap.c from the array + * protection_map. + * The original really should be exported to modules, or + * some routine which does the conversion for you + */ + +static const pgprot_t my_protect_map[16] = +{ + __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, + __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 +}; + +static pgprot_t agp_convert_mmap_flags(int prot) +{ +#define _trans(x,bit1,bit2) \ +((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0) + + unsigned long prot_bits; + pgprot_t temp; + + prot_bits = _trans(prot, PROT_READ, VM_READ) | + _trans(prot, PROT_WRITE, VM_WRITE) | + _trans(prot, PROT_EXEC, VM_EXEC); + + prot_bits |= VM_SHARED; + + temp = my_protect_map[prot_bits & 0x0000000f]; + + return temp; +} + +static int agp_create_segment(agp_client * client, agp_region * region) +{ + agp_segment_priv **ret_seg; + agp_segment_priv *seg; + agp_segment *user_seg; + int i; + + seg = kmalloc((sizeof(agp_segment_priv) * region->seg_count), GFP_KERNEL); + if (seg == NULL) { + kfree(region->seg_list); + return -ENOMEM; + } + memset(seg, 0, (sizeof(agp_segment_priv) * region->seg_count)); + user_seg = region->seg_list; + + for (i = 0; i < region->seg_count; i++) { + seg[i].pg_start = user_seg[i].pg_start; + seg[i].pg_count = user_seg[i].pg_count; + seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot); + } + ret_seg = kmalloc(sizeof(void *), GFP_KERNEL); + if (ret_seg == NULL) { + kfree(region->seg_list); + kfree(seg); + return -ENOMEM; + } + *ret_seg = seg; + kfree(region->seg_list); + agp_add_seg_to_client(client, ret_seg, region->seg_count); + return 0; +} + +/* End - Routines for managing each client's segment list */ + +/* This function must only be called when current_controller != NULL */ +static void agp_insert_into_pool(agp_memory * temp) +{ + agp_memory *prev; + + prev = agp_fe.current_controller->pool; + + if (prev != NULL) { + prev->prev = temp; + temp->next = prev; + } + agp_fe.current_controller->pool = temp; +} + + +/* File private list routines */ + +agp_file_private *agp_find_private(pid_t pid) +{ + agp_file_private *curr; + + curr = agp_fe.file_priv_list; + + while (curr != NULL) { + if (curr->my_pid == pid) { + return curr; + } + curr = curr->next; + } + + return NULL; +} + +void agp_insert_file_private(agp_file_private * priv) +{ + agp_file_private *prev; + + prev = agp_fe.file_priv_list; + + if (prev != NULL) { + prev->prev = priv; + } + priv->next = prev; + agp_fe.file_priv_list = priv; +} + +void agp_remove_file_private(agp_file_private * priv) +{ + agp_file_private *next; + agp_file_private *prev; + + next = priv->next; + prev = priv->prev; + + if (prev != NULL) { + prev->next = next; + + if (next != NULL) { + next->prev = prev; + } + } else { + if (next != NULL) { + next->prev = NULL; + } + agp_fe.file_priv_list = next; + } +} + +/* End - File flag list routines */ + +/* + * Wrappers for agp_free_memory & agp_allocate_memory + * These make sure that internal lists are kept updated. + */ +static void agp_free_memory_wrap(agp_memory * memory) +{ + agp_remove_from_pool(memory); + agp_free_memory(memory); +} + +static agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type) +{ + agp_memory *memory; + + memory = agp_allocate_memory(pg_count, type); + + if (memory == NULL) { + return NULL; + } + agp_insert_into_pool(memory); + return memory; +} + +/* Routines for managing the list of controllers - + * These routines manage the current controller, and the list of + * controllers + */ + +static agp_controller *agp_find_controller_by_pid(pid_t id) +{ + agp_controller *controller; + + controller = agp_fe.controllers; + + while (controller != NULL) { + if (controller->pid == id) { + return controller; + } + controller = controller->next; + } + + return NULL; +} + +static agp_controller *agp_create_controller(pid_t id) +{ + agp_controller *controller; + + controller = kmalloc(sizeof(agp_controller), GFP_KERNEL); + + if (controller == NULL) { + return NULL; + } + memset(controller, 0, sizeof(agp_controller)); + controller->pid = id; + + return controller; +} + +static int agp_insert_controller(agp_controller * controller) +{ + agp_controller *prev_controller; + + prev_controller = agp_fe.controllers; + controller->next = prev_controller; + + if (prev_controller != NULL) { + prev_controller->prev = controller; + } + agp_fe.controllers = controller; + + return 0; +} + +static void agp_remove_all_clients(agp_controller * controller) +{ + agp_client *client; + agp_client *temp; + + client = controller->clients; + + while (client) { + agp_file_private *priv; + + temp = client; + agp_remove_seg_from_client(temp); + priv = agp_find_private(temp->pid); + + if (priv != NULL) { + clear_bit(AGP_FF_IS_VALID, &(priv->access_flags)); + clear_bit(AGP_FF_IS_CLIENT, &(priv->access_flags)); + } + client = client->next; + kfree(temp); + } +} + +static void agp_remove_all_memory(agp_controller * controller) +{ + agp_memory *memory; + agp_memory *temp; + + memory = controller->pool; + + while (memory) { + temp = memory; + memory = memory->next; + agp_free_memory_wrap(temp); + } +} + +static int agp_remove_controller(agp_controller * controller) +{ + agp_controller *prev_controller; + agp_controller *next_controller; + + prev_controller = controller->prev; + next_controller = controller->next; + + if (prev_controller != NULL) { + prev_controller->next = next_controller; + if (next_controller != NULL) { + next_controller->prev = prev_controller; + } + } else { + if (next_controller != NULL) { + next_controller->prev = NULL; + } + agp_fe.controllers = next_controller; + } + + agp_remove_all_memory(controller); + agp_remove_all_clients(controller); + + if (agp_fe.current_controller == controller) { + agp_fe.current_controller = NULL; + agp_fe.backend_acquired = FALSE; + agp_backend_release(); + } + kfree(controller); + return 0; +} + +static void agp_controller_make_current(agp_controller * controller) +{ + agp_client *clients; + + clients = controller->clients; + + while (clients != NULL) { + agp_file_private *priv; + + priv = agp_find_private(clients->pid); + + if (priv != NULL) { + set_bit(AGP_FF_IS_VALID, &(priv->access_flags)); + set_bit(AGP_FF_IS_CLIENT, &(priv->access_flags)); + } + clients = clients->next; + } + + agp_fe.current_controller = controller; +} + +static void agp_controller_release_current(agp_controller * controller, + agp_file_private * controller_priv) +{ + agp_client *clients; + + clear_bit(AGP_FF_IS_VALID, &(controller_priv->access_flags)); + clients = controller->clients; + + while (clients != NULL) { + agp_file_private *priv; + + priv = agp_find_private(clients->pid); + + if (priv != NULL) { + clear_bit(AGP_FF_IS_VALID, &(priv->access_flags)); + } + clients = clients->next; + } + + agp_fe.current_controller = NULL; + agp_fe.used_by_controller = FALSE; + agp_backend_release(); +} + +/* + * Routines for managing client lists - + * These routines are for managing the list of auth'ed clients. + */ + +static agp_client *agp_find_client_in_controller(agp_controller * controller, + pid_t id) +{ + agp_client *client; + + if (controller == NULL) { + return NULL; + } + client = controller->clients; + + while (client != NULL) { + if (client->pid == id) { + return client; + } + client = client->next; + } + + return NULL; +} + +static agp_controller *agp_find_controller_for_client(pid_t id) +{ + agp_controller *controller; + + controller = agp_fe.controllers; + + while (controller != NULL) { + if ((agp_find_client_in_controller(controller, id)) != NULL) { + return controller; + } + controller = controller->next; + } + + return NULL; +} + +static agp_client *agp_find_client_by_pid(pid_t id) +{ + agp_client *temp; + + if (agp_fe.current_controller == NULL) { + return NULL; + } + temp = agp_find_client_in_controller(agp_fe.current_controller, id); + return temp; +} + +static void agp_insert_client(agp_client * client) +{ + agp_client *prev_client; + + prev_client = agp_fe.current_controller->clients; + client->next = prev_client; + + if (prev_client != NULL) { + prev_client->prev = client; + } + agp_fe.current_controller->clients = client; + agp_fe.current_controller->num_clients++; +} + +static agp_client *agp_create_client(pid_t id) +{ + agp_client *new_client; + + new_client = kmalloc(sizeof(agp_client), GFP_KERNEL); + + if (new_client == NULL) { + return NULL; + } + memset(new_client, 0, sizeof(agp_client)); + new_client->pid = id; + agp_insert_client(new_client); + return new_client; +} + +static int agp_remove_client(pid_t id) +{ + agp_client *client; + agp_client *prev_client; + agp_client *next_client; + agp_controller *controller; + + controller = agp_find_controller_for_client(id); + + if (controller == NULL) { + return -EINVAL; + } + client = agp_find_client_in_controller(controller, id); + + if (client == NULL) { + return -EINVAL; + } + prev_client = client->prev; + next_client = client->next; + + if (prev_client != NULL) { + prev_client->next = next_client; + if (next_client != NULL) { + next_client->prev = prev_client; + } + } else { + if (next_client != NULL) { + next_client->prev = NULL; + } + controller->clients = next_client; + } + + controller->num_clients--; + agp_remove_seg_from_client(client); + kfree(client); + return 0; +} + +/* End - Routines for managing client lists */ + +/* File Operations */ + +static int agp_mmap(struct file *file, struct vm_area_struct *vma) +{ + int size; + int current_size; + unsigned long offset; + agp_client *client; + agp_file_private *priv = (agp_file_private *) file->private_data; + agp_kern_info kerninfo; + + AGP_LOCK(); + + if (agp_fe.backend_acquired != TRUE) { + AGP_UNLOCK(); + return -EPERM; + } + if (!(test_bit(AGP_FF_IS_VALID, &(priv->access_flags)))) { + AGP_UNLOCK(); + return -EPERM; + } + agp_copy_info(&kerninfo); + size = vma->vm_end - vma->vm_start; + current_size = kerninfo.aper_size; + current_size = current_size * 0x100000; + offset = vma->vm_offset; + + if (test_bit(AGP_FF_IS_CLIENT, &(priv->access_flags))) { + if ((size + offset) > current_size) { + AGP_UNLOCK(); + return -EINVAL; + } + client = agp_find_client_by_pid(current->pid); + + if (client == NULL) { + AGP_UNLOCK(); + return -EPERM; + } + if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot)) { + AGP_UNLOCK(); + return -EINVAL; + } + if (remap_page_range(vma->vm_start, (kerninfo.aper_base + offset), + size, vma->vm_page_prot)) { + AGP_UNLOCK(); + return -EAGAIN; + } + AGP_UNLOCK(); + return 0; + } + if (test_bit(AGP_FF_IS_CONTROLLER, &(priv->access_flags))) { + if (size != current_size) { + AGP_UNLOCK(); + return -EINVAL; + } + if (remap_page_range(vma->vm_start, kerninfo.aper_base, + size, vma->vm_page_prot)) { + AGP_UNLOCK(); + return -EAGAIN; + } + AGP_UNLOCK(); + return 0; + } + AGP_UNLOCK(); + return -EPERM; +} + +static int agp_release(struct inode *inode, struct file *file) +{ + agp_file_private *priv = (agp_file_private *) file->private_data; + + AGP_LOCK(); + + if (test_bit(AGP_FF_IS_CONTROLLER, &(priv->access_flags))) { + agp_controller *controller; + + controller = agp_find_controller_by_pid(priv->my_pid); + + if (controller != NULL) { + if (controller == agp_fe.current_controller) { + agp_controller_release_current(controller, priv); + } + agp_remove_controller(controller); + } + } + if (test_bit(AGP_FF_IS_CLIENT, &(priv->access_flags))) { + agp_remove_client(priv->my_pid); + } + agp_remove_file_private(priv); + kfree(priv); + MOD_DEC_USE_COUNT; + AGP_UNLOCK(); + return 0; +} + +static int agp_open(struct inode *inode, struct file *file) +{ + int minor = MINOR(inode->i_rdev); + agp_file_private *priv; + agp_client *client; + + AGP_LOCK(); + + if (minor != AGPGART_MINOR) { + AGP_UNLOCK(); + return -ENXIO; + } + priv = kmalloc(sizeof(agp_file_private), GFP_KERNEL); + + if (priv == NULL) { + AGP_UNLOCK(); + return -ENOMEM; + } + memset(priv, 0, sizeof(agp_file_private)); + set_bit(AGP_FF_ALLOW_CLIENT, &(priv->access_flags)); + priv->my_pid = current->pid; + + if ((current->uid == 0) || (current->suid == 0)) { + /* Root priv, can be controller */ + set_bit(AGP_FF_ALLOW_CONTROLLER, &(priv->access_flags)); + } + client = agp_find_client_by_pid(current->pid); + + if (client != NULL) { + set_bit(AGP_FF_IS_CLIENT, &(priv->access_flags)); + set_bit(AGP_FF_IS_VALID, &(priv->access_flags)); + } + file->private_data = (void *) priv; + agp_insert_file_private(priv); + MOD_INC_USE_COUNT; + AGP_UNLOCK(); + return 0; +} + + +static long long agp_lseek(struct file *file, long long offset, int origin) +{ + return -ESPIPE; +} + +static ssize_t agp_read(struct file *file, char *buf, + size_t count, loff_t * ppos) +{ + return -EINVAL; +} + +static ssize_t agp_write(struct file *file, const char *buf, + size_t count, loff_t * ppos) +{ + return -EINVAL; +} + +static int agpioc_info_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_info userinfo; + agp_kern_info kerninfo; + + agp_copy_info(&kerninfo); + + userinfo.version.major = kerninfo.version.major; + userinfo.version.minor = kerninfo.version.minor; + userinfo.bridge_id = kerninfo.device->vendor | (kerninfo.device->device << 16); + userinfo.agp_mode = kerninfo.mode; + userinfo.aper_base = kerninfo.aper_base; + userinfo.aper_size = kerninfo.aper_size; + userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory; + userinfo.pg_used = kerninfo.current_memory; + + if (copy_to_user((void *) arg, &userinfo, sizeof(agp_info))) { + return -EFAULT; + } + return 0; +} + +static int agpioc_acquire_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_controller *controller; + if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &(priv->access_flags)))) { + return -EPERM; + } + if (agp_fe.current_controller != NULL) { + return -EBUSY; + } + if ((agp_backend_acquire()) == 0) { + agp_fe.backend_acquired = TRUE; + } else { + return -EBUSY; + } + + controller = agp_find_controller_by_pid(priv->my_pid); + + if (controller != NULL) { + agp_controller_make_current(controller); + } else { + controller = agp_create_controller(priv->my_pid); + + if (controller == NULL) { + agp_fe.backend_acquired = FALSE; + agp_backend_release(); + return -ENOMEM; + } + agp_insert_controller(controller); + agp_controller_make_current(controller); + } + + set_bit(AGP_FF_IS_CONTROLLER, &(priv->access_flags)); + set_bit(AGP_FF_IS_VALID, &(priv->access_flags)); + return 0; +} + +static int agpioc_release_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_controller_release_current(agp_fe.current_controller, priv); + return 0; +} + +static int agpioc_setup_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_setup mode; + + if (copy_from_user(&mode, (void *) arg, sizeof(agp_setup))) { + return -EFAULT; + } + agp_enable(mode.agp_mode); + return 0; +} + +static int agpioc_reserve_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_region reserve; + agp_client *client; + agp_file_private *client_priv; + + + if (copy_from_user(&reserve, (void *) arg, sizeof(agp_region))) { + return -EFAULT; + } + client = agp_find_client_by_pid(reserve.pid); + + if (reserve.seg_count == 0) { + /* remove a client */ + client_priv = agp_find_private(reserve.pid); + + if (client_priv != NULL) { + set_bit(AGP_FF_IS_CLIENT, &(client_priv->access_flags)); + set_bit(AGP_FF_IS_VALID, &(client_priv->access_flags)); + } + if (client == NULL) { + /* client is already removed */ + return 0; + } + return agp_remove_client(reserve.pid); + } else { + agp_segment *segment; + + segment = kmalloc((sizeof(agp_segment) * reserve.seg_count), GFP_KERNEL); + + if (segment == NULL) { + return -ENOMEM; + } + if (copy_from_user(segment, (void *) reserve.seg_list, GFP_KERNEL)) { + kfree(segment); + return -EFAULT; + } + reserve.seg_list = segment; + + if (client == NULL) { + /* Create the client and add the segment */ + client = agp_create_client(reserve.pid); + + if (client == NULL) { + kfree(segment); + return -ENOMEM; + } + client_priv = agp_find_private(reserve.pid); + + if (client_priv != NULL) { + set_bit(AGP_FF_IS_CLIENT, &(client_priv->access_flags)); + set_bit(AGP_FF_IS_VALID, &(client_priv->access_flags)); + } + return agp_create_segment(client, &reserve); + } else { + return agp_create_segment(client, &reserve); + } + } + /* Will never really happen */ + return -EINVAL; +} + +static int agpioc_protect_wrap(agp_file_private * priv, unsigned long arg) +{ + /* This function is not currently implemented */ + return -EINVAL; +} + +static int agpioc_allocate_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_memory *memory; + agp_allocate alloc; + + if (copy_from_user(&alloc, (void *) arg, sizeof(agp_allocate))) { + return -EFAULT; + } + memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); + + if (memory == NULL) { + return -ENOMEM; + } + alloc.key = memory->key; + + if (copy_to_user((void *) arg, &alloc, sizeof(agp_allocate))) { + agp_free_memory_wrap(memory); + return -EFAULT; + } + return 0; +} + +static int agpioc_deallocate_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_memory *memory; + + memory = agp_find_mem_by_key((int) arg); + + if (memory == NULL) { + return -EINVAL; + } + agp_free_memory_wrap(memory); + return 0; +} + +static int agpioc_bind_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_bind bind_info; + agp_memory *memory; + + if (copy_from_user(&bind_info, (void *) arg, sizeof(agp_bind))) { + return -EFAULT; + } + memory = agp_find_mem_by_key(bind_info.key); + + if (memory == NULL) { + return -EINVAL; + } + return agp_bind_memory(memory, bind_info.pg_start); +} + +static int agpioc_unbind_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_memory *memory; + agp_unbind unbind; + + if (copy_from_user(&unbind, (void *) arg, sizeof(agp_unbind))) { + return -EFAULT; + } + memory = agp_find_mem_by_key(unbind.key); + + if (memory == NULL) { + return -EINVAL; + } + return agp_unbind_memory(memory); +} + +static int agp_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + agp_file_private *curr_priv = (agp_file_private *) file->private_data; + int ret_val; + + AGP_LOCK(); + + if ((agp_fe.current_controller == NULL) && + (cmd != AGPIOC_ACQUIRE)) { + return -EINVAL; + } + if ((agp_fe.backend_acquired != TRUE) && + (cmd != AGPIOC_ACQUIRE)) { + return -EBUSY; + } + if (cmd != AGPIOC_ACQUIRE) { + if (!(test_bit(AGP_FF_IS_CONTROLLER, &(curr_priv->access_flags)))) { + return -EPERM; + } + /* Use the original pid of the controller, in case it's threaded */ + + if (agp_fe.current_controller->pid != curr_priv->my_pid) { + return -EBUSY; + } + } + switch (cmd) { + case AGPIOC_INFO: + { + ret_val = agpioc_info_wrap(curr_priv, arg); + AGP_UNLOCK(); + return ret_val; + } + case AGPIOC_ACQUIRE: + { + ret_val = agpioc_acquire_wrap(curr_priv, arg); + AGP_UNLOCK(); + return ret_val; + } + case AGPIOC_RELEASE: + { + ret_val = agpioc_release_wrap(curr_priv, arg); + AGP_UNLOCK(); + return ret_val; + } + case AGPIOC_SETUP: + { + ret_val = agpioc_setup_wrap(curr_priv, arg); + AGP_UNLOCK(); + return ret_val; + } + case AGPIOC_RESERVE: + { + ret_val = agpioc_reserve_wrap(curr_priv, arg); + AGP_UNLOCK(); + return ret_val; + } + case AGPIOC_PROTECT: + { + ret_val = agpioc_protect_wrap(curr_priv, arg); + AGP_UNLOCK(); + return ret_val; + } + case AGPIOC_ALLOCATE: + { + ret_val = agpioc_allocate_wrap(curr_priv, arg); + AGP_UNLOCK(); + return ret_val; + } + case AGPIOC_DEALLOCATE: + { + ret_val = agpioc_deallocate_wrap(curr_priv, arg); + AGP_UNLOCK(); + return ret_val; + } + case AGPIOC_BIND: + { + ret_val = agpioc_bind_wrap(curr_priv, arg); + AGP_UNLOCK(); + return ret_val; + } + case AGPIOC_UNBIND: + { + ret_val = agpioc_unbind_wrap(curr_priv, arg); + AGP_UNLOCK(); + return ret_val; + } + } + + AGP_UNLOCK(); + return -ENOTTY; +} + +static struct file_operations agp_fops = +{ + agp_lseek, + agp_read, + agp_write, + NULL, + NULL, + agp_ioctl, + agp_mmap, + agp_open, + NULL, + agp_release +}; + +static struct miscdevice agp_miscdev = +{ + AGPGART_MINOR, + "agpgart", + &agp_fops +}; + +int agp_frontend_initialize(void) +{ + memset(&agp_fe, 0, sizeof(struct agp_front_data)); + AGP_LOCK_INIT(); + + if (misc_register(&agp_miscdev)) { + printk("agpgart: unable to get minor: %d\n", AGPGART_MINOR); + return -EIO; + } + return 0; +} + +void agp_frontend_cleanup(void) +{ + return; +} --- linux/drivers/char/Config.in.newagpdist Tue Jan 4 13:12:14 2000 +++ linux/drivers/char/Config.in Fri Feb 11 14:50:45 2000 @@ -62,7 +62,7 @@ bool ' Support IEEE1284 status readback' CONFIG_PRINTER_READBACK fi fi - + bool 'Mouse Support (not serial mice)' CONFIG_MOUSE if [ "$CONFIG_MOUSE" = "y" ]; then mainmenu_option next_comment @@ -116,6 +116,18 @@ bool 'Tadpole ANA H8 Support' CONFIG_H8 fi +if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then + dep_tristate '/dev/agpgart (AGP Support) (EXPERIMENTAL)' CONFIG_AGP m + if [ "$CONFIG_AGP" = "m" ]; then + bool ' Intel 440LX/BX/GX support' CONFIG_AGP_INTEL + bool ' Intel I810/I810 DC100/I810e support' CONFIG_AGP_I810 + bool ' VIA VP3/MVP3/Apollo Pro support' CONFIG_AGP_VIA + bool ' AMD Irongate support' CONFIG_AGP_AMD + bool ' Generic SiS support' CONFIG_AGP_SIS + bool ' ALI M1541 support' CONFIG_AGP_ALI + fi +fi + mainmenu_option next_comment comment 'Video For Linux' --- linux/drivers/char/Makefile.newagpdist Fri Feb 11 14:49:48 2000 +++ linux/drivers/char/Makefile Fri Feb 11 14:50:45 2000 @@ -352,6 +352,11 @@ endif endif +ifeq ($(CONFIG_AGP), m) + ALL_SUB_DIRS += agp + MOD_SUB_DIRS += agp +endif + ifeq ($(CONFIG_VIDEO_DEV),y) LX_OBJS += videodev.o else --- linux/arch/i386/mm/ioremap.c.newagpdist Mon Aug 9 15:04:38 1999 +++ linux/arch/i386/mm/ioremap.c Fri Feb 11 14:50:45 2000 @@ -110,7 +110,18 @@ * Don't allow anybody to remap normal RAM that we're using.. */ if (phys_addr < virt_to_phys(high_memory)) - return NULL; + { + char *temp_addr, *temp_end; + int i; + + temp_addr = __va(phys_addr); + temp_end = temp_addr + (size - 1); + + for(i = MAP_NR(temp_addr); i < MAP_NR(temp_end); i++) { + if(!PageReserved(mem_map + i)) + return NULL; + } + } /* * Mappings have to be page-aligned --- linux/Documentation/Configure.help.newagpdist Fri Feb 11 14:49:49 2000 +++ linux/Documentation/Configure.help Fri Feb 11 14:50:45 2000 @@ -9430,6 +9430,20 @@ sampling), then say Y here, and read Documentation/rtc.txt for details. +AGP/GART support +CONFIG_AGP + This provides a kernel interface (/dev/agpgart) for programming AGP + transfers on motherboards that support them. Primarily, this is used + for hardware-accelerated 3d graphics, though any other AGP device + could take advantage of it. + + If you have a 3d-capable AGP video card say 'M' or 'Y' here. + Otherwise, say 'N'. + + You will also have to indicate support for your specific chipset. + Consult the output of lspci, your motherboard manual, or the inside + of your computer if unsure what to choose. Multiple selections are ok. + Tadpole ANA H8 Support CONFIG_H8 The Hitachi H8/337 is a microcontroller used to deal with the power