More Makefile cleanups, otherwise mainly noticeable are the netfilter fix
[davej-history.git] / drivers / char / agp / agpgart_be.c
blob942142832282f8daf0d2ec97d0310e06b64957e3
1 /*
2 * AGPGART module version 0.99
3 * Copyright (C) 1999 Jeff Hartmann
4 * Copyright (C) 1999 Precision Insight, Inc.
5 * Copyright (C) 1999 Xi Graphics, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
23 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/config.h>
27 #include <linux/version.h>
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/kernel.h>
31 #include <linux/sched.h>
32 #include <linux/mm.h>
33 #include <linux/string.h>
34 #include <linux/errno.h>
35 #include <linux/malloc.h>
36 #include <linux/vmalloc.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/pagemap.h>
40 #include <linux/miscdevice.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
44 #include <asm/page.h>
46 #include <linux/agp_backend.h>
47 #include "agp.h"
49 MODULE_AUTHOR("Jeff Hartmann <jhartmann@precisioninsight.com>");
50 MODULE_PARM(agp_try_unsupported, "1i");
51 EXPORT_SYMBOL(agp_free_memory);
52 EXPORT_SYMBOL(agp_allocate_memory);
53 EXPORT_SYMBOL(agp_copy_info);
54 EXPORT_SYMBOL(agp_bind_memory);
55 EXPORT_SYMBOL(agp_unbind_memory);
56 EXPORT_SYMBOL(agp_enable);
57 EXPORT_SYMBOL(agp_backend_acquire);
58 EXPORT_SYMBOL(agp_backend_release);
60 static void flush_cache(void);
62 static struct agp_bridge_data agp_bridge;
63 static int agp_try_unsupported __initdata = 0;
66 static inline void flush_cache(void)
68 #if defined(__i386__)
69 asm volatile ("wbinvd":::"memory");
70 #elif defined(__alpha__) || defined(__ia64__)
71 /* ??? I wonder if we'll really need to flush caches, or if the
72 core logic can manage to keep the system coherent. The ARM
73 speaks only of using `cflush' to get things in memory in
74 preparation for power failure.
76 If we do need to call `cflush', we'll need a target page,
77 as we can only flush one page at a time.
79 Ditto for IA-64. --davidm 00/08/07 */
80 mb();
81 #else
82 #error "Please define flush_cache."
83 #endif
86 #ifdef CONFIG_SMP
87 static atomic_t cpus_waiting;
89 static void ipi_handler(void *null)
91 flush_cache();
92 atomic_dec(&cpus_waiting);
93 while (atomic_read(&cpus_waiting) > 0)
94 barrier();
97 static void smp_flush_cache(void)
99 atomic_set(&cpus_waiting, smp_num_cpus - 1);
100 if (smp_call_function(ipi_handler, NULL, 1, 0) != 0)
101 panic(PFX "timed out waiting for the other CPUs!\n");
102 flush_cache();
103 while (atomic_read(&cpus_waiting) > 0)
104 barrier();
106 #define global_cache_flush smp_flush_cache
107 #else /* CONFIG_SMP */
108 #define global_cache_flush flush_cache
109 #endif /* CONFIG_SMP */
111 int agp_backend_acquire(void)
113 if (agp_bridge.type == NOT_SUPPORTED) {
114 return -EINVAL;
116 atomic_inc(&agp_bridge.agp_in_use);
118 if (atomic_read(&agp_bridge.agp_in_use) != 1) {
119 atomic_dec(&agp_bridge.agp_in_use);
120 return -EBUSY;
122 MOD_INC_USE_COUNT;
123 return 0;
126 void agp_backend_release(void)
128 if (agp_bridge.type == NOT_SUPPORTED) {
129 return;
131 atomic_dec(&agp_bridge.agp_in_use);
132 MOD_DEC_USE_COUNT;
136 * Basic Page Allocation Routines -
137 * These routines handle page allocation
138 * and by default they reserve the allocated
139 * memory. They also handle incrementing the
140 * current_memory_agp value, Which is checked
141 * against a maximum value.
144 static unsigned long agp_alloc_page(void)
146 void *pt;
148 pt = (void *) __get_free_page(GFP_KERNEL);
149 if (pt == NULL) {
150 return 0;
152 atomic_inc(&virt_to_page(pt)->count);
153 set_bit(PG_locked, &virt_to_page(pt)->flags);
154 atomic_inc(&agp_bridge.current_memory_agp);
155 return (unsigned long) pt;
158 static void agp_destroy_page(unsigned long page)
160 void *pt = (void *) page;
162 if (pt == NULL) {
163 return;
165 atomic_dec(&virt_to_page(pt)->count);
166 clear_bit(PG_locked, &virt_to_page(pt)->flags);
167 wake_up(&virt_to_page(pt)->wait);
168 free_page((unsigned long) pt);
169 atomic_dec(&agp_bridge.current_memory_agp);
172 /* End Basic Page Allocation Routines */
175 * Generic routines for handling agp_memory structures -
176 * They use the basic page allocation routines to do the
177 * brunt of the work.
181 static void agp_free_key(int key)
184 if (key < 0) {
185 return;
187 if (key < MAXKEY) {
188 clear_bit(key, agp_bridge.key_list);
192 static int agp_get_key(void)
194 int bit;
196 bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY);
197 if (bit < MAXKEY) {
198 set_bit(bit, agp_bridge.key_list);
199 return bit;
201 return -1;
204 static agp_memory *agp_create_memory(int scratch_pages)
206 agp_memory *new;
208 new = kmalloc(sizeof(agp_memory), GFP_KERNEL);
210 if (new == NULL) {
211 return NULL;
213 memset(new, 0, sizeof(agp_memory));
214 new->key = agp_get_key();
216 if (new->key < 0) {
217 kfree(new);
218 return NULL;
220 new->memory = vmalloc(PAGE_SIZE * scratch_pages);
222 if (new->memory == NULL) {
223 agp_free_key(new->key);
224 kfree(new);
225 return NULL;
227 new->num_scratch_pages = scratch_pages;
228 return new;
231 void agp_free_memory(agp_memory * curr)
233 int i;
235 if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL)) {
236 return;
238 if (curr->is_bound == TRUE) {
239 agp_unbind_memory(curr);
241 if (curr->type != 0) {
242 agp_bridge.free_by_type(curr);
243 return;
245 if (curr->page_count != 0) {
246 for (i = 0; i < curr->page_count; i++) {
247 curr->memory[i] &= ~(0x00000fff);
248 agp_destroy_page((unsigned long)
249 phys_to_virt(curr->memory[i]));
252 agp_free_key(curr->key);
253 vfree(curr->memory);
254 kfree(curr);
255 MOD_DEC_USE_COUNT;
258 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
260 agp_memory *agp_allocate_memory(size_t page_count, u32 type)
262 int scratch_pages;
263 agp_memory *new;
264 int i;
266 if (agp_bridge.type == NOT_SUPPORTED) {
267 return NULL;
269 if ((atomic_read(&agp_bridge.current_memory_agp) + page_count) >
270 agp_bridge.max_memory_agp) {
271 return NULL;
274 if (type != 0) {
275 new = agp_bridge.alloc_by_type(page_count, type);
276 return new;
278 /* We always increase the module count, since free auto-decrements
279 * it
282 MOD_INC_USE_COUNT;
284 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
286 new = agp_create_memory(scratch_pages);
288 if (new == NULL) {
289 MOD_DEC_USE_COUNT;
290 return NULL;
292 for (i = 0; i < page_count; i++) {
293 new->memory[i] = agp_alloc_page();
295 if (new->memory[i] == 0) {
296 /* Free this structure */
297 agp_free_memory(new);
298 return NULL;
300 new->memory[i] =
301 agp_bridge.mask_memory(
302 virt_to_phys((void *) new->memory[i]),
303 type);
304 new->page_count++;
307 return new;
310 /* End - Generic routines for handling agp_memory structures */
312 static int agp_return_size(void)
314 int current_size;
315 void *temp;
317 temp = agp_bridge.current_size;
319 switch (agp_bridge.size_type) {
320 case U8_APER_SIZE:
321 current_size = A_SIZE_8(temp)->size;
322 break;
323 case U16_APER_SIZE:
324 current_size = A_SIZE_16(temp)->size;
325 break;
326 case U32_APER_SIZE:
327 current_size = A_SIZE_32(temp)->size;
328 break;
329 case LVL2_APER_SIZE:
330 current_size = A_SIZE_LVL2(temp)->size;
331 break;
332 case FIXED_APER_SIZE:
333 current_size = A_SIZE_FIX(temp)->size;
334 break;
335 default:
336 current_size = 0;
337 break;
340 return current_size;
343 /* Routine to copy over information structure */
345 void agp_copy_info(agp_kern_info * info)
347 memset(info, 0, sizeof(agp_kern_info));
348 if (agp_bridge.type == NOT_SUPPORTED) {
349 info->chipset = agp_bridge.type;
350 return;
352 info->version.major = agp_bridge.version->major;
353 info->version.minor = agp_bridge.version->minor;
354 info->device = agp_bridge.dev;
355 info->chipset = agp_bridge.type;
356 info->mode = agp_bridge.mode;
357 info->aper_base = agp_bridge.gart_bus_addr;
358 info->aper_size = agp_return_size();
359 info->max_memory = agp_bridge.max_memory_agp;
360 info->current_memory = atomic_read(&agp_bridge.current_memory_agp);
363 /* End - Routine to copy over information structure */
366 * Routines for handling swapping of agp_memory into the GATT -
367 * These routines take agp_memory and insert them into the GATT.
368 * They call device specific routines to actually write to the GATT.
371 int agp_bind_memory(agp_memory * curr, off_t pg_start)
373 int ret_val;
375 if ((agp_bridge.type == NOT_SUPPORTED) ||
376 (curr == NULL) || (curr->is_bound == TRUE)) {
377 return -EINVAL;
379 if (curr->is_flushed == FALSE) {
380 CACHE_FLUSH();
381 curr->is_flushed = TRUE;
383 ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type);
385 if (ret_val != 0) {
386 return ret_val;
388 curr->is_bound = TRUE;
389 curr->pg_start = pg_start;
390 return 0;
393 int agp_unbind_memory(agp_memory * curr)
395 int ret_val;
397 if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL)) {
398 return -EINVAL;
400 if (curr->is_bound != TRUE) {
401 return -EINVAL;
403 ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type);
405 if (ret_val != 0) {
406 return ret_val;
408 curr->is_bound = FALSE;
409 curr->pg_start = 0;
410 return 0;
413 /* End - Routines for handling swapping of agp_memory into the GATT */
416 * Driver routines - start
417 * Currently this module supports the following chipsets:
418 * i810, 440lx, 440bx, 440gx, i840, i850, via vp3, via mvp3, via kx133,
419 * via kt133, amd irongate, ALi M1541, and generic support for the SiS
420 * chipsets.
423 /* Generic Agp routines - Start */
425 static void agp_generic_agp_enable(u32 mode)
427 struct pci_dev *device = NULL;
428 u32 command, scratch, cap_id;
429 u8 cap_ptr;
431 pci_read_config_dword(agp_bridge.dev,
432 agp_bridge.capndx + 4,
433 &command);
436 * PASS1: go throu all devices that claim to be
437 * AGP devices and collect their data.
440 while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8,
441 device)) != NULL) {
442 pci_read_config_dword(device, 0x04, &scratch);
444 if (!(scratch & 0x00100000))
445 continue;
447 pci_read_config_byte(device, 0x34, &cap_ptr);
449 if (cap_ptr != 0x00) {
450 do {
451 pci_read_config_dword(device,
452 cap_ptr, &cap_id);
454 if ((cap_id & 0xff) != 0x02)
455 cap_ptr = (cap_id >> 8) & 0xff;
457 while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
459 if (cap_ptr != 0x00) {
461 * Ok, here we have a AGP device. Disable impossible
462 * settings, and adjust the readqueue to the minimum.
465 pci_read_config_dword(device, cap_ptr + 4, &scratch);
467 /* adjust RQ depth */
468 command =
469 ((command & ~0xff000000) |
470 min((mode & 0xff000000),
471 min((command & 0xff000000),
472 (scratch & 0xff000000))));
474 /* disable SBA if it's not supported */
475 if (!((command & 0x00000200) &&
476 (scratch & 0x00000200) &&
477 (mode & 0x00000200)))
478 command &= ~0x00000200;
480 /* disable FW if it's not supported */
481 if (!((command & 0x00000010) &&
482 (scratch & 0x00000010) &&
483 (mode & 0x00000010)))
484 command &= ~0x00000010;
486 if (!((command & 4) &&
487 (scratch & 4) &&
488 (mode & 4)))
489 command &= ~0x00000004;
491 if (!((command & 2) &&
492 (scratch & 2) &&
493 (mode & 2)))
494 command &= ~0x00000002;
496 if (!((command & 1) &&
497 (scratch & 1) &&
498 (mode & 1)))
499 command &= ~0x00000001;
503 * PASS2: Figure out the 4X/2X/1X setting and enable the
504 * target (our motherboard chipset).
507 if (command & 4) {
508 command &= ~3; /* 4X */
510 if (command & 2) {
511 command &= ~5; /* 2X */
513 if (command & 1) {
514 command &= ~6; /* 1X */
516 command |= 0x00000100;
518 pci_write_config_dword(agp_bridge.dev,
519 agp_bridge.capndx + 8,
520 command);
523 * PASS3: Go throu all AGP devices and update the
524 * command registers.
527 while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8,
528 device)) != NULL) {
529 pci_read_config_dword(device, 0x04, &scratch);
531 if (!(scratch & 0x00100000))
532 continue;
534 pci_read_config_byte(device, 0x34, &cap_ptr);
536 if (cap_ptr != 0x00) {
537 do {
538 pci_read_config_dword(device,
539 cap_ptr, &cap_id);
541 if ((cap_id & 0xff) != 0x02)
542 cap_ptr = (cap_id >> 8) & 0xff;
544 while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
546 if (cap_ptr != 0x00)
547 pci_write_config_dword(device, cap_ptr + 8, command);
551 static int agp_generic_create_gatt_table(void)
553 char *table;
554 char *table_end;
555 int size;
556 int page_order;
557 int num_entries;
558 int i;
559 void *temp;
560 struct page *page;
562 /* The generic routines can't handle 2 level gatt's */
563 if (agp_bridge.size_type == LVL2_APER_SIZE) {
564 return -EINVAL;
567 table = NULL;
568 i = agp_bridge.aperture_size_idx;
569 temp = agp_bridge.current_size;
570 size = page_order = num_entries = 0;
572 if (agp_bridge.size_type != FIXED_APER_SIZE) {
573 do {
574 switch (agp_bridge.size_type) {
575 case U8_APER_SIZE:
576 size = A_SIZE_8(temp)->size;
577 page_order =
578 A_SIZE_8(temp)->page_order;
579 num_entries =
580 A_SIZE_8(temp)->num_entries;
581 break;
582 case U16_APER_SIZE:
583 size = A_SIZE_16(temp)->size;
584 page_order = A_SIZE_16(temp)->page_order;
585 num_entries = A_SIZE_16(temp)->num_entries;
586 break;
587 case U32_APER_SIZE:
588 size = A_SIZE_32(temp)->size;
589 page_order = A_SIZE_32(temp)->page_order;
590 num_entries = A_SIZE_32(temp)->num_entries;
591 break;
592 /* This case will never really happen. */
593 case FIXED_APER_SIZE:
594 case LVL2_APER_SIZE:
595 default:
596 size = page_order = num_entries = 0;
597 break;
600 table = (char *) __get_free_pages(GFP_KERNEL,
601 page_order);
603 if (table == NULL) {
604 i++;
605 switch (agp_bridge.size_type) {
606 case U8_APER_SIZE:
607 agp_bridge.current_size = A_IDX8();
608 break;
609 case U16_APER_SIZE:
610 agp_bridge.current_size = A_IDX16();
611 break;
612 case U32_APER_SIZE:
613 agp_bridge.current_size = A_IDX32();
614 break;
615 /* This case will never really
616 * happen.
618 case FIXED_APER_SIZE:
619 case LVL2_APER_SIZE:
620 default:
621 agp_bridge.current_size =
622 agp_bridge.current_size;
623 break;
625 } else {
626 agp_bridge.aperture_size_idx = i;
628 } while ((table == NULL) &&
629 (i < agp_bridge.num_aperture_sizes));
630 } else {
631 size = ((aper_size_info_fixed *) temp)->size;
632 page_order = ((aper_size_info_fixed *) temp)->page_order;
633 num_entries = ((aper_size_info_fixed *) temp)->num_entries;
634 table = (char *) __get_free_pages(GFP_KERNEL, page_order);
637 if (table == NULL) {
638 return -ENOMEM;
640 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
642 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
643 set_bit(PG_reserved, &page->flags);
645 agp_bridge.gatt_table_real = (unsigned long *) table;
646 CACHE_FLUSH();
647 agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
648 (PAGE_SIZE * (1 << page_order)));
649 CACHE_FLUSH();
651 if (agp_bridge.gatt_table == NULL) {
652 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
653 clear_bit(PG_reserved, &page->flags);
655 free_pages((unsigned long) table, page_order);
657 return -ENOMEM;
659 agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real);
661 for (i = 0; i < num_entries; i++) {
662 agp_bridge.gatt_table[i] =
663 (unsigned long) agp_bridge.scratch_page;
666 return 0;
669 static int agp_generic_free_gatt_table(void)
671 int page_order;
672 char *table, *table_end;
673 void *temp;
674 struct page *page;
676 temp = agp_bridge.current_size;
678 switch (agp_bridge.size_type) {
679 case U8_APER_SIZE:
680 page_order = A_SIZE_8(temp)->page_order;
681 break;
682 case U16_APER_SIZE:
683 page_order = A_SIZE_16(temp)->page_order;
684 break;
685 case U32_APER_SIZE:
686 page_order = A_SIZE_32(temp)->page_order;
687 break;
688 case FIXED_APER_SIZE:
689 page_order = A_SIZE_FIX(temp)->page_order;
690 break;
691 case LVL2_APER_SIZE:
692 /* The generic routines can't deal with 2 level gatt's */
693 return -EINVAL;
694 break;
695 default:
696 page_order = 0;
697 break;
700 /* Do not worry about freeing memory, because if this is
701 * called, then all agp memory is deallocated and removed
702 * from the table.
705 iounmap(agp_bridge.gatt_table);
706 table = (char *) agp_bridge.gatt_table_real;
707 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
709 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
710 clear_bit(PG_reserved, &page->flags);
712 free_pages((unsigned long) agp_bridge.gatt_table_real, page_order);
713 return 0;
716 static int agp_generic_insert_memory(agp_memory * mem,
717 off_t pg_start, int type)
719 int i, j, num_entries;
720 void *temp;
722 temp = agp_bridge.current_size;
724 switch (agp_bridge.size_type) {
725 case U8_APER_SIZE:
726 num_entries = A_SIZE_8(temp)->num_entries;
727 break;
728 case U16_APER_SIZE:
729 num_entries = A_SIZE_16(temp)->num_entries;
730 break;
731 case U32_APER_SIZE:
732 num_entries = A_SIZE_32(temp)->num_entries;
733 break;
734 case FIXED_APER_SIZE:
735 num_entries = A_SIZE_FIX(temp)->num_entries;
736 break;
737 case LVL2_APER_SIZE:
738 /* The generic routines can't deal with 2 level gatt's */
739 return -EINVAL;
740 break;
741 default:
742 num_entries = 0;
743 break;
746 if (type != 0 || mem->type != 0) {
747 /* The generic routines know nothing of memory types */
748 return -EINVAL;
750 if ((pg_start + mem->page_count) > num_entries) {
751 return -EINVAL;
753 j = pg_start;
755 while (j < (pg_start + mem->page_count)) {
756 if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
757 return -EBUSY;
759 j++;
762 if (mem->is_flushed == FALSE) {
763 CACHE_FLUSH();
764 mem->is_flushed = TRUE;
766 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
767 agp_bridge.gatt_table[j] = mem->memory[i];
770 agp_bridge.tlb_flush(mem);
771 return 0;
774 static int agp_generic_remove_memory(agp_memory * mem, off_t pg_start,
775 int type)
777 int i;
779 if (type != 0 || mem->type != 0) {
780 /* The generic routines know nothing of memory types */
781 return -EINVAL;
783 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
784 agp_bridge.gatt_table[i] =
785 (unsigned long) agp_bridge.scratch_page;
788 agp_bridge.tlb_flush(mem);
789 return 0;
792 static agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
794 return NULL;
797 static void agp_generic_free_by_type(agp_memory * curr)
799 if (curr->memory != NULL) {
800 vfree(curr->memory);
802 agp_free_key(curr->key);
803 kfree(curr);
806 void agp_enable(u32 mode)
808 if (agp_bridge.type == NOT_SUPPORTED) return;
809 agp_bridge.agp_enable(mode);
812 /* End - Generic Agp routines */
814 #ifdef CONFIG_AGP_I810
815 static aper_size_info_fixed intel_i810_sizes[] =
817 {64, 16384, 4},
818 /* The 32M mode still requires a 64k gatt */
819 {32, 8192, 4}
822 #define AGP_DCACHE_MEMORY 1
823 #define AGP_PHYS_MEMORY 2
825 static gatt_mask intel_i810_masks[] =
827 {I810_PTE_VALID, 0},
828 {(I810_PTE_VALID | I810_PTE_LOCAL), AGP_DCACHE_MEMORY},
829 {I810_PTE_VALID, 0}
832 static struct _intel_i810_private {
833 struct pci_dev *i810_dev; /* device one */
834 volatile u8 *registers;
835 int num_dcache_entries;
836 } intel_i810_private;
838 static int intel_i810_fetch_size(void)
840 u32 smram_miscc;
841 aper_size_info_fixed *values;
843 pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc);
844 values = A_SIZE_FIX(agp_bridge.aperture_sizes);
846 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
847 printk(KERN_WARNING PFX "i810 is disabled\n");
848 return 0;
850 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
851 agp_bridge.previous_size =
852 agp_bridge.current_size = (void *) (values + 1);
853 agp_bridge.aperture_size_idx = 1;
854 return values[1].size;
855 } else {
856 agp_bridge.previous_size =
857 agp_bridge.current_size = (void *) (values);
858 agp_bridge.aperture_size_idx = 0;
859 return values[0].size;
862 return 0;
865 static int intel_i810_configure(void)
867 aper_size_info_fixed *current_size;
868 u32 temp;
869 int i;
871 current_size = A_SIZE_FIX(agp_bridge.current_size);
873 pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp);
874 temp &= 0xfff80000;
876 intel_i810_private.registers =
877 (volatile u8 *) ioremap(temp, 128 * 4096);
879 if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL)
880 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
881 /* This will need to be dynamically assigned */
882 printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n");
883 intel_i810_private.num_dcache_entries = 1024;
885 pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp);
886 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
887 OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL,
888 agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED);
889 CACHE_FLUSH();
891 if (agp_bridge.needs_scratch_page == TRUE) {
892 for (i = 0; i < current_size->num_entries; i++) {
893 OUTREG32(intel_i810_private.registers,
894 I810_PTE_BASE + (i * 4),
895 agp_bridge.scratch_page);
898 return 0;
901 static void intel_i810_cleanup(void)
903 OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0);
904 iounmap((void *) intel_i810_private.registers);
907 static void intel_i810_tlbflush(agp_memory * mem)
909 return;
912 static void intel_i810_agp_enable(u32 mode)
914 return;
917 static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start,
918 int type)
920 int i, j, num_entries;
921 void *temp;
923 temp = agp_bridge.current_size;
924 num_entries = A_SIZE_FIX(temp)->num_entries;
926 if ((pg_start + mem->page_count) > num_entries) {
927 return -EINVAL;
929 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
930 if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
931 return -EBUSY;
935 if (type != 0 || mem->type != 0) {
936 if ((type == AGP_DCACHE_MEMORY) &&
937 (mem->type == AGP_DCACHE_MEMORY)) {
938 /* special insert */
939 CACHE_FLUSH();
940 for (i = pg_start;
941 i < (pg_start + mem->page_count); i++) {
942 OUTREG32(intel_i810_private.registers,
943 I810_PTE_BASE + (i * 4),
944 (i * 4096) | I810_PTE_LOCAL |
945 I810_PTE_VALID);
947 CACHE_FLUSH();
948 agp_bridge.tlb_flush(mem);
949 return 0;
951 if((type == AGP_PHYS_MEMORY) &&
952 (mem->type == AGP_PHYS_MEMORY)) {
953 goto insert;
955 return -EINVAL;
958 insert:
959 CACHE_FLUSH();
960 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
961 OUTREG32(intel_i810_private.registers,
962 I810_PTE_BASE + (j * 4), mem->memory[i]);
964 CACHE_FLUSH();
966 agp_bridge.tlb_flush(mem);
967 return 0;
970 static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start,
971 int type)
973 int i;
975 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
976 OUTREG32(intel_i810_private.registers,
977 I810_PTE_BASE + (i * 4),
978 agp_bridge.scratch_page);
981 CACHE_FLUSH();
982 agp_bridge.tlb_flush(mem);
983 return 0;
986 static agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
988 agp_memory *new;
990 if (type == AGP_DCACHE_MEMORY) {
991 if (pg_count != intel_i810_private.num_dcache_entries) {
992 return NULL;
994 new = agp_create_memory(1);
996 if (new == NULL) {
997 return NULL;
999 new->type = AGP_DCACHE_MEMORY;
1000 new->page_count = pg_count;
1001 new->num_scratch_pages = 0;
1002 vfree(new->memory);
1003 MOD_INC_USE_COUNT;
1004 return new;
1006 if(type == AGP_PHYS_MEMORY) {
1007 /* The I810 requires a physical address to program
1008 * it's mouse pointer into hardware. However the
1009 * Xserver still writes to it through the agp
1010 * aperture
1012 if (pg_count != 1) {
1013 return NULL;
1015 new = agp_create_memory(1);
1017 if (new == NULL) {
1018 return NULL;
1020 MOD_INC_USE_COUNT;
1021 new->memory[0] = agp_alloc_page();
1023 if (new->memory[0] == 0) {
1024 /* Free this structure */
1025 agp_free_memory(new);
1026 return NULL;
1028 new->memory[0] =
1029 agp_bridge.mask_memory(
1030 virt_to_phys((void *) new->memory[0]),
1031 type);
1032 new->page_count = 1;
1033 new->num_scratch_pages = 1;
1034 new->type = AGP_PHYS_MEMORY;
1035 new->physical = virt_to_phys((void *) new->memory[0]);
1036 return new;
1039 return NULL;
1042 static void intel_i810_free_by_type(agp_memory * curr)
1044 agp_free_key(curr->key);
1045 if(curr->type == AGP_PHYS_MEMORY) {
1046 agp_destroy_page((unsigned long)
1047 phys_to_virt(curr->memory[0]));
1048 vfree(curr->memory);
1050 kfree(curr);
1051 MOD_DEC_USE_COUNT;
1054 static unsigned long intel_i810_mask_memory(unsigned long addr, int type)
1056 /* Type checking must be done elsewhere */
1057 return addr | agp_bridge.masks[type].mask;
1060 static int __init intel_i810_setup(struct pci_dev *i810_dev)
1062 intel_i810_private.i810_dev = i810_dev;
1064 agp_bridge.masks = intel_i810_masks;
1065 agp_bridge.num_of_masks = 2;
1066 agp_bridge.aperture_sizes = (void *) intel_i810_sizes;
1067 agp_bridge.size_type = FIXED_APER_SIZE;
1068 agp_bridge.num_aperture_sizes = 2;
1069 agp_bridge.dev_private_data = (void *) &intel_i810_private;
1070 agp_bridge.needs_scratch_page = TRUE;
1071 agp_bridge.configure = intel_i810_configure;
1072 agp_bridge.fetch_size = intel_i810_fetch_size;
1073 agp_bridge.cleanup = intel_i810_cleanup;
1074 agp_bridge.tlb_flush = intel_i810_tlbflush;
1075 agp_bridge.mask_memory = intel_i810_mask_memory;
1076 agp_bridge.agp_enable = intel_i810_agp_enable;
1077 agp_bridge.cache_flush = global_cache_flush;
1078 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1079 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1080 agp_bridge.insert_memory = intel_i810_insert_entries;
1081 agp_bridge.remove_memory = intel_i810_remove_entries;
1082 agp_bridge.alloc_by_type = intel_i810_alloc_by_type;
1083 agp_bridge.free_by_type = intel_i810_free_by_type;
1085 return 0;
1088 #endif /* CONFIG_AGP_I810 */
1090 #ifdef CONFIG_AGP_INTEL
1092 static int intel_fetch_size(void)
1094 int i;
1095 u16 temp;
1096 aper_size_info_16 *values;
1098 pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp);
1099 values = A_SIZE_16(agp_bridge.aperture_sizes);
1101 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1102 if (temp == values[i].size_value) {
1103 agp_bridge.previous_size =
1104 agp_bridge.current_size = (void *) (values + i);
1105 agp_bridge.aperture_size_idx = i;
1106 return values[i].size;
1110 return 0;
1113 static void intel_tlbflush(agp_memory * mem)
1115 pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200);
1116 pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
1119 static void intel_cleanup(void)
1121 u16 temp;
1122 aper_size_info_16 *previous_size;
1124 previous_size = A_SIZE_16(agp_bridge.previous_size);
1125 pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
1126 pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
1127 pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
1128 previous_size->size_value);
1131 static int intel_configure(void)
1133 u32 temp;
1134 u16 temp2;
1135 aper_size_info_16 *current_size;
1137 current_size = A_SIZE_16(agp_bridge.current_size);
1139 /* aperture size */
1140 pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
1141 current_size->size_value);
1143 /* address to map to */
1144 pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
1145 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1147 /* attbase - aperture base */
1148 pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
1149 agp_bridge.gatt_bus_addr);
1151 /* agpctrl */
1152 pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
1154 /* paccfg/nbxcfg */
1155 pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
1156 pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG,
1157 (temp2 & ~(1 << 10)) | (1 << 9));
1158 /* clear any possible error conditions */
1159 pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7);
1160 return 0;
1163 static int intel_840_configure(void)
1165 u32 temp;
1166 u16 temp2;
1167 aper_size_info_16 *current_size;
1169 current_size = A_SIZE_16(agp_bridge.current_size);
1171 /* aperture size */
1172 pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
1173 (char)current_size->size_value);
1175 /* address to map to */
1176 pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
1177 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1179 /* attbase - aperture base */
1180 pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
1181 agp_bridge.gatt_bus_addr);
1183 /* agpctrl */
1184 pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
1186 /* mcgcfg */
1187 pci_read_config_word(agp_bridge.dev, INTEL_I840_MCHCFG, &temp2);
1188 pci_write_config_word(agp_bridge.dev, INTEL_I840_MCHCFG,
1189 temp2 | (1 << 9));
1190 /* clear any possible error conditions */
1191 pci_write_config_word(agp_bridge.dev, INTEL_I840_ERRSTS, 0xc000);
1192 return 0;
1195 static int intel_850_configure(void)
1197 u32 temp;
1198 u16 temp2;
1199 aper_size_info_16 *current_size;
1201 current_size = A_SIZE_16(agp_bridge.current_size);
1203 /* aperture size */
1204 pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
1205 (char)current_size->size_value);
1207 /* address to map to */
1208 pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
1209 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1211 /* attbase - aperture base */
1212 pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
1213 agp_bridge.gatt_bus_addr);
1215 /* agpctrl */
1216 pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
1218 /* mcgcfg */
1219 pci_read_config_word(agp_bridge.dev, INTEL_I850_MCHCFG, &temp2);
1220 pci_write_config_word(agp_bridge.dev, INTEL_I850_MCHCFG,
1221 temp2 | (1 << 9));
1222 /* clear any possible AGP-related error conditions */
1223 pci_write_config_word(agp_bridge.dev, INTEL_I850_ERRSTS, 0x001c);
1224 return 0;
1227 static unsigned long intel_mask_memory(unsigned long addr, int type)
1229 /* Memory type is ignored */
1231 return addr | agp_bridge.masks[0].mask;
1235 /* Setup function */
1236 static gatt_mask intel_generic_masks[] =
1238 {0x00000017, 0}
1241 static aper_size_info_16 intel_generic_sizes[7] =
1243 {256, 65536, 6, 0},
1244 {128, 32768, 5, 32},
1245 {64, 16384, 4, 48},
1246 {32, 8192, 3, 56},
1247 {16, 4096, 2, 60},
1248 {8, 2048, 1, 62},
1249 {4, 1024, 0, 63}
1252 static int __init intel_generic_setup (struct pci_dev *pdev)
1254 agp_bridge.masks = intel_generic_masks;
1255 agp_bridge.num_of_masks = 1;
1256 agp_bridge.aperture_sizes = (void *) intel_generic_sizes;
1257 agp_bridge.size_type = U16_APER_SIZE;
1258 agp_bridge.num_aperture_sizes = 7;
1259 agp_bridge.dev_private_data = NULL;
1260 agp_bridge.needs_scratch_page = FALSE;
1261 agp_bridge.configure = intel_configure;
1262 agp_bridge.fetch_size = intel_fetch_size;
1263 agp_bridge.cleanup = intel_cleanup;
1264 agp_bridge.tlb_flush = intel_tlbflush;
1265 agp_bridge.mask_memory = intel_mask_memory;
1266 agp_bridge.agp_enable = agp_generic_agp_enable;
1267 agp_bridge.cache_flush = global_cache_flush;
1268 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1269 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1270 agp_bridge.insert_memory = agp_generic_insert_memory;
1271 agp_bridge.remove_memory = agp_generic_remove_memory;
1272 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1273 agp_bridge.free_by_type = agp_generic_free_by_type;
1275 return 0;
1277 (void) pdev; /* unused */
1280 static int __init intel_840_setup (struct pci_dev *pdev)
1282 agp_bridge.masks = intel_generic_masks;
1283 agp_bridge.num_of_masks = 1;
1284 agp_bridge.aperture_sizes = (void *) intel_generic_sizes;
1285 agp_bridge.size_type = U16_APER_SIZE;
1286 agp_bridge.num_aperture_sizes = 7;
1287 agp_bridge.dev_private_data = NULL;
1288 agp_bridge.needs_scratch_page = FALSE;
1289 agp_bridge.configure = intel_840_configure;
1290 agp_bridge.fetch_size = intel_fetch_size;
1291 agp_bridge.cleanup = intel_cleanup;
1292 agp_bridge.tlb_flush = intel_tlbflush;
1293 agp_bridge.mask_memory = intel_mask_memory;
1294 agp_bridge.agp_enable = agp_generic_agp_enable;
1295 agp_bridge.cache_flush = global_cache_flush;
1296 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1297 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1298 agp_bridge.insert_memory = agp_generic_insert_memory;
1299 agp_bridge.remove_memory = agp_generic_remove_memory;
1300 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1301 agp_bridge.free_by_type = agp_generic_free_by_type;
1303 return 0;
1305 (void) pdev; /* unused */
1308 static int __init intel_850_setup (struct pci_dev *pdev)
1310 agp_bridge.masks = intel_generic_masks;
1311 agp_bridge.num_of_masks = 1;
1312 agp_bridge.aperture_sizes = (void *) intel_generic_sizes;
1313 agp_bridge.size_type = U16_APER_SIZE;
1314 agp_bridge.num_aperture_sizes = 7;
1315 agp_bridge.dev_private_data = NULL;
1316 agp_bridge.needs_scratch_page = FALSE;
1317 agp_bridge.configure = intel_850_configure;
1318 agp_bridge.fetch_size = intel_fetch_size;
1319 agp_bridge.cleanup = intel_cleanup;
1320 agp_bridge.tlb_flush = intel_tlbflush;
1321 agp_bridge.mask_memory = intel_mask_memory;
1322 agp_bridge.agp_enable = agp_generic_agp_enable;
1323 agp_bridge.cache_flush = global_cache_flush;
1324 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1325 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1326 agp_bridge.insert_memory = agp_generic_insert_memory;
1327 agp_bridge.remove_memory = agp_generic_remove_memory;
1328 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1329 agp_bridge.free_by_type = agp_generic_free_by_type;
1331 return 0;
1333 (void) pdev; /* unused */
1336 #endif /* CONFIG_AGP_INTEL */
1338 #ifdef CONFIG_AGP_VIA
1340 static int via_fetch_size(void)
1342 int i;
1343 u8 temp;
1344 aper_size_info_8 *values;
1346 values = A_SIZE_8(agp_bridge.aperture_sizes);
1347 pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp);
1348 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1349 if (temp == values[i].size_value) {
1350 agp_bridge.previous_size =
1351 agp_bridge.current_size = (void *) (values + i);
1352 agp_bridge.aperture_size_idx = i;
1353 return values[i].size;
1357 return 0;
1360 static int via_configure(void)
1362 u32 temp;
1363 aper_size_info_8 *current_size;
1365 current_size = A_SIZE_8(agp_bridge.current_size);
1366 /* aperture size */
1367 pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
1368 current_size->size_value);
1369 /* address to map too */
1370 pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp);
1371 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1373 /* GART control register */
1374 pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
1376 /* attbase - aperture GATT base */
1377 pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE,
1378 (agp_bridge.gatt_bus_addr & 0xfffff000) | 3);
1379 return 0;
1382 static void via_cleanup(void)
1384 aper_size_info_8 *previous_size;
1386 previous_size = A_SIZE_8(agp_bridge.previous_size);
1387 pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, 0);
1388 pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
1389 previous_size->size_value);
1392 static void via_tlbflush(agp_memory * mem)
1394 pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000008f);
1395 pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
1398 static unsigned long via_mask_memory(unsigned long addr, int type)
1400 /* Memory type is ignored */
1402 return addr | agp_bridge.masks[0].mask;
1405 static aper_size_info_8 via_generic_sizes[7] =
1407 {256, 65536, 6, 0},
1408 {128, 32768, 5, 128},
1409 {64, 16384, 4, 192},
1410 {32, 8192, 3, 224},
1411 {16, 4096, 2, 240},
1412 {8, 2048, 1, 248},
1413 {4, 1024, 0, 252}
1416 static gatt_mask via_generic_masks[] =
1418 {0x00000000, 0}
1421 static int __init via_generic_setup (struct pci_dev *pdev)
1423 agp_bridge.masks = via_generic_masks;
1424 agp_bridge.num_of_masks = 1;
1425 agp_bridge.aperture_sizes = (void *) via_generic_sizes;
1426 agp_bridge.size_type = U8_APER_SIZE;
1427 agp_bridge.num_aperture_sizes = 7;
1428 agp_bridge.dev_private_data = NULL;
1429 agp_bridge.needs_scratch_page = FALSE;
1430 agp_bridge.configure = via_configure;
1431 agp_bridge.fetch_size = via_fetch_size;
1432 agp_bridge.cleanup = via_cleanup;
1433 agp_bridge.tlb_flush = via_tlbflush;
1434 agp_bridge.mask_memory = via_mask_memory;
1435 agp_bridge.agp_enable = agp_generic_agp_enable;
1436 agp_bridge.cache_flush = global_cache_flush;
1437 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1438 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1439 agp_bridge.insert_memory = agp_generic_insert_memory;
1440 agp_bridge.remove_memory = agp_generic_remove_memory;
1441 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1442 agp_bridge.free_by_type = agp_generic_free_by_type;
1444 return 0;
1446 (void) pdev; /* unused */
1449 #endif /* CONFIG_AGP_VIA */
1451 #ifdef CONFIG_AGP_SIS
1453 static int sis_fetch_size(void)
1455 u8 temp_size;
1456 int i;
1457 aper_size_info_8 *values;
1459 pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size);
1460 values = A_SIZE_8(agp_bridge.aperture_sizes);
1461 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1462 if ((temp_size == values[i].size_value) ||
1463 ((temp_size & ~(0x03)) ==
1464 (values[i].size_value & ~(0x03)))) {
1465 agp_bridge.previous_size =
1466 agp_bridge.current_size = (void *) (values + i);
1468 agp_bridge.aperture_size_idx = i;
1469 return values[i].size;
1473 return 0;
1477 static void sis_tlbflush(agp_memory * mem)
1479 pci_write_config_byte(agp_bridge.dev, SIS_TLBFLUSH, 0x02);
1482 static int sis_configure(void)
1484 u32 temp;
1485 aper_size_info_8 *current_size;
1487 current_size = A_SIZE_8(agp_bridge.current_size);
1488 pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05);
1489 pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp);
1490 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1491 pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE,
1492 agp_bridge.gatt_bus_addr);
1493 pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
1494 current_size->size_value);
1495 return 0;
1498 static void sis_cleanup(void)
1500 aper_size_info_8 *previous_size;
1502 previous_size = A_SIZE_8(agp_bridge.previous_size);
1503 pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
1504 (previous_size->size_value & ~(0x03)));
1507 static unsigned long sis_mask_memory(unsigned long addr, int type)
1509 /* Memory type is ignored */
1511 return addr | agp_bridge.masks[0].mask;
1514 static aper_size_info_8 sis_generic_sizes[7] =
1516 {256, 65536, 6, 99},
1517 {128, 32768, 5, 83},
1518 {64, 16384, 4, 67},
1519 {32, 8192, 3, 51},
1520 {16, 4096, 2, 35},
1521 {8, 2048, 1, 19},
1522 {4, 1024, 0, 3}
1525 static gatt_mask sis_generic_masks[] =
1527 {0x00000000, 0}
1530 static int __init sis_generic_setup (struct pci_dev *pdev)
1532 agp_bridge.masks = sis_generic_masks;
1533 agp_bridge.num_of_masks = 1;
1534 agp_bridge.aperture_sizes = (void *) sis_generic_sizes;
1535 agp_bridge.size_type = U8_APER_SIZE;
1536 agp_bridge.num_aperture_sizes = 7;
1537 agp_bridge.dev_private_data = NULL;
1538 agp_bridge.needs_scratch_page = FALSE;
1539 agp_bridge.configure = sis_configure;
1540 agp_bridge.fetch_size = sis_fetch_size;
1541 agp_bridge.cleanup = sis_cleanup;
1542 agp_bridge.tlb_flush = sis_tlbflush;
1543 agp_bridge.mask_memory = sis_mask_memory;
1544 agp_bridge.agp_enable = agp_generic_agp_enable;
1545 agp_bridge.cache_flush = global_cache_flush;
1546 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1547 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1548 agp_bridge.insert_memory = agp_generic_insert_memory;
1549 agp_bridge.remove_memory = agp_generic_remove_memory;
1550 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1551 agp_bridge.free_by_type = agp_generic_free_by_type;
1553 return 0;
1556 #endif /* CONFIG_AGP_SIS */
1558 #ifdef CONFIG_AGP_AMD
1560 typedef struct _amd_page_map {
1561 unsigned long *real;
1562 unsigned long *remapped;
1563 } amd_page_map;
1565 static struct _amd_irongate_private {
1566 volatile u8 *registers;
1567 amd_page_map **gatt_pages;
1568 int num_tables;
1569 } amd_irongate_private;
1571 static int amd_create_page_map(amd_page_map *page_map)
1573 int i;
1575 page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
1576 if (page_map->real == NULL) {
1577 return -ENOMEM;
1579 set_bit(PG_reserved, &virt_to_page(page_map->real)->flags);
1580 CACHE_FLUSH();
1581 page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
1582 PAGE_SIZE);
1583 if (page_map->remapped == NULL) {
1584 clear_bit(PG_reserved,
1585 &virt_to_page(page_map->real)->flags);
1586 free_page((unsigned long) page_map->real);
1587 page_map->real = NULL;
1588 return -ENOMEM;
1590 CACHE_FLUSH();
1592 for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
1593 page_map->remapped[i] = agp_bridge.scratch_page;
1596 return 0;
1599 static void amd_free_page_map(amd_page_map *page_map)
1601 iounmap(page_map->remapped);
1602 clear_bit(PG_reserved,
1603 &virt_to_page(page_map->real)->flags);
1604 free_page((unsigned long) page_map->real);
1607 static void amd_free_gatt_pages(void)
1609 int i;
1610 amd_page_map **tables;
1611 amd_page_map *entry;
1613 tables = amd_irongate_private.gatt_pages;
1614 for(i = 0; i < amd_irongate_private.num_tables; i++) {
1615 entry = tables[i];
1616 if (entry != NULL) {
1617 if (entry->real != NULL) {
1618 amd_free_page_map(entry);
1620 kfree(entry);
1623 kfree(tables);
1626 static int amd_create_gatt_pages(int nr_tables)
1628 amd_page_map **tables;
1629 amd_page_map *entry;
1630 int retval = 0;
1631 int i;
1633 tables = kmalloc((nr_tables + 1) * sizeof(amd_page_map *),
1634 GFP_KERNEL);
1635 if (tables == NULL) {
1636 return -ENOMEM;
1638 memset(tables, 0, sizeof(amd_page_map *) * (nr_tables + 1));
1639 for (i = 0; i < nr_tables; i++) {
1640 entry = kmalloc(sizeof(amd_page_map), GFP_KERNEL);
1641 if (entry == NULL) {
1642 retval = -ENOMEM;
1643 break;
1645 memset(entry, 0, sizeof(amd_page_map));
1646 tables[i] = entry;
1647 retval = amd_create_page_map(entry);
1648 if (retval != 0) break;
1650 amd_irongate_private.num_tables = nr_tables;
1651 amd_irongate_private.gatt_pages = tables;
1653 if (retval != 0) amd_free_gatt_pages();
1655 return retval;
1658 /* Since we don't need contigious memory we just try
1659 * to get the gatt table once
1662 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
1663 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
1664 GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr))
1665 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
1666 #define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
1667 GET_PAGE_DIR_IDX(addr)]->remapped)
1669 static int amd_create_gatt_table(void)
1671 aper_size_info_lvl2 *value;
1672 amd_page_map page_dir;
1673 unsigned long addr;
1674 int retval;
1675 u32 temp;
1676 int i;
1678 value = A_SIZE_LVL2(agp_bridge.current_size);
1679 retval = amd_create_page_map(&page_dir);
1680 if (retval != 0) {
1681 return retval;
1684 retval = amd_create_gatt_pages(value->num_entries / 1024);
1685 if (retval != 0) {
1686 amd_free_page_map(&page_dir);
1687 return retval;
1690 agp_bridge.gatt_table_real = page_dir.real;
1691 agp_bridge.gatt_table = page_dir.remapped;
1692 agp_bridge.gatt_bus_addr = virt_to_bus(page_dir.real);
1694 /* Get the address for the gart region.
1695 * This is a bus address even on the alpha, b/c its
1696 * used to program the agp master not the cpu
1699 pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp);
1700 addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1701 agp_bridge.gart_bus_addr = addr;
1703 /* Calculate the agp offset */
1704 for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
1705 page_dir.remapped[GET_PAGE_DIR_OFF(addr)] =
1706 virt_to_bus(amd_irongate_private.gatt_pages[i]->real);
1707 page_dir.remapped[GET_PAGE_DIR_OFF(addr)] |= 0x00000001;
1710 return 0;
1713 static int amd_free_gatt_table(void)
1715 amd_page_map page_dir;
1717 page_dir.real = agp_bridge.gatt_table_real;
1718 page_dir.remapped = agp_bridge.gatt_table;
1720 amd_free_gatt_pages();
1721 amd_free_page_map(&page_dir);
1722 return 0;
1725 static int amd_irongate_fetch_size(void)
1727 int i;
1728 u32 temp;
1729 aper_size_info_lvl2 *values;
1731 pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
1732 temp = (temp & 0x0000000e);
1733 values = A_SIZE_LVL2(agp_bridge.aperture_sizes);
1734 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1735 if (temp == values[i].size_value) {
1736 agp_bridge.previous_size =
1737 agp_bridge.current_size = (void *) (values + i);
1739 agp_bridge.aperture_size_idx = i;
1740 return values[i].size;
1744 return 0;
1747 static int amd_irongate_configure(void)
1749 aper_size_info_lvl2 *current_size;
1750 u32 temp;
1751 u16 enable_reg;
1753 current_size = A_SIZE_LVL2(agp_bridge.current_size);
1755 /* Get the memory mapped registers */
1756 pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp);
1757 temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1758 amd_irongate_private.registers = (volatile u8 *) ioremap(temp, 4096);
1760 /* Write out the address of the gatt table */
1761 OUTREG32(amd_irongate_private.registers, AMD_ATTBASE,
1762 agp_bridge.gatt_bus_addr);
1764 /* Write the Sync register */
1765 pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80);
1767 /* Set indexing mode */
1768 pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL2, 0x00);
1770 /* Write the enable register */
1771 enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
1772 enable_reg = (enable_reg | 0x0004);
1773 OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
1775 /* Write out the size register */
1776 pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
1777 temp = (((temp & ~(0x0000000e)) | current_size->size_value)
1778 | 0x00000001);
1779 pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
1781 /* Flush the tlb */
1782 OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
1784 return 0;
1787 static void amd_irongate_cleanup(void)
1789 aper_size_info_lvl2 *previous_size;
1790 u32 temp;
1791 u16 enable_reg;
1793 previous_size = A_SIZE_LVL2(agp_bridge.previous_size);
1795 enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
1796 enable_reg = (enable_reg & ~(0x0004));
1797 OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
1799 /* Write back the previous size and disable gart translation */
1800 pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
1801 temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
1802 pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
1803 iounmap((void *) amd_irongate_private.registers);
1807 * This routine could be implemented by taking the addresses
1808 * written to the GATT, and flushing them individually. However
1809 * currently it just flushes the whole table. Which is probably
1810 * more efficent, since agp_memory blocks can be a large number of
1811 * entries.
1814 static void amd_irongate_tlbflush(agp_memory * temp)
1816 OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
1819 static unsigned long amd_irongate_mask_memory(unsigned long addr, int type)
1821 /* Only type 0 is supported by the irongate */
1823 return addr | agp_bridge.masks[0].mask;
1826 static int amd_insert_memory(agp_memory * mem,
1827 off_t pg_start, int type)
1829 int i, j, num_entries;
1830 unsigned long *cur_gatt;
1831 unsigned long addr;
1833 num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries;
1835 if (type != 0 || mem->type != 0) {
1836 return -EINVAL;
1838 if ((pg_start + mem->page_count) > num_entries) {
1839 return -EINVAL;
1842 j = pg_start;
1843 while (j < (pg_start + mem->page_count)) {
1844 addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
1845 cur_gatt = GET_GATT(addr);
1846 if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) {
1847 return -EBUSY;
1849 j++;
1852 if (mem->is_flushed == FALSE) {
1853 CACHE_FLUSH();
1854 mem->is_flushed = TRUE;
1857 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1858 addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
1859 cur_gatt = GET_GATT(addr);
1860 cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i];
1862 agp_bridge.tlb_flush(mem);
1863 return 0;
1866 static int amd_remove_memory(agp_memory * mem, off_t pg_start,
1867 int type)
1869 int i;
1870 unsigned long *cur_gatt;
1871 unsigned long addr;
1873 if (type != 0 || mem->type != 0) {
1874 return -EINVAL;
1876 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1877 addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr;
1878 cur_gatt = GET_GATT(addr);
1879 cur_gatt[GET_GATT_OFF(addr)] =
1880 (unsigned long) agp_bridge.scratch_page;
1883 agp_bridge.tlb_flush(mem);
1884 return 0;
1887 static aper_size_info_lvl2 amd_irongate_sizes[7] =
1889 {2048, 524288, 0x0000000c},
1890 {1024, 262144, 0x0000000a},
1891 {512, 131072, 0x00000008},
1892 {256, 65536, 0x00000006},
1893 {128, 32768, 0x00000004},
1894 {64, 16384, 0x00000002},
1895 {32, 8192, 0x00000000}
1898 static gatt_mask amd_irongate_masks[] =
1900 {0x00000001, 0}
1903 static int __init amd_irongate_setup (struct pci_dev *pdev)
1905 agp_bridge.masks = amd_irongate_masks;
1906 agp_bridge.num_of_masks = 1;
1907 agp_bridge.aperture_sizes = (void *) amd_irongate_sizes;
1908 agp_bridge.size_type = LVL2_APER_SIZE;
1909 agp_bridge.num_aperture_sizes = 7;
1910 agp_bridge.dev_private_data = (void *) &amd_irongate_private;
1911 agp_bridge.needs_scratch_page = FALSE;
1912 agp_bridge.configure = amd_irongate_configure;
1913 agp_bridge.fetch_size = amd_irongate_fetch_size;
1914 agp_bridge.cleanup = amd_irongate_cleanup;
1915 agp_bridge.tlb_flush = amd_irongate_tlbflush;
1916 agp_bridge.mask_memory = amd_irongate_mask_memory;
1917 agp_bridge.agp_enable = agp_generic_agp_enable;
1918 agp_bridge.cache_flush = global_cache_flush;
1919 agp_bridge.create_gatt_table = amd_create_gatt_table;
1920 agp_bridge.free_gatt_table = amd_free_gatt_table;
1921 agp_bridge.insert_memory = amd_insert_memory;
1922 agp_bridge.remove_memory = amd_remove_memory;
1923 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1924 agp_bridge.free_by_type = agp_generic_free_by_type;
1926 return 0;
1928 (void) pdev; /* unused */
1931 #endif /* CONFIG_AGP_AMD */
1933 #ifdef CONFIG_AGP_ALI
1935 static int ali_fetch_size(void)
1937 int i;
1938 u32 temp;
1939 aper_size_info_32 *values;
1941 pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
1942 temp &= ~(0xfffffff0);
1943 values = A_SIZE_32(agp_bridge.aperture_sizes);
1945 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1946 if (temp == values[i].size_value) {
1947 agp_bridge.previous_size =
1948 agp_bridge.current_size = (void *) (values + i);
1949 agp_bridge.aperture_size_idx = i;
1950 return values[i].size;
1954 return 0;
1957 static void ali_tlbflush(agp_memory * mem)
1959 u32 temp;
1961 pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
1962 pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
1963 ((temp & 0xffffff00) | 0x00000090));
1964 pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
1965 ((temp & 0xffffff00) | 0x00000010));
1968 static void ali_cleanup(void)
1970 aper_size_info_32 *previous_size;
1971 u32 temp;
1973 previous_size = A_SIZE_32(agp_bridge.previous_size);
1975 pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
1976 pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
1977 ((temp & 0xffffff00) | 0x00000090));
1978 pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
1979 previous_size->size_value);
1982 static int ali_configure(void)
1984 u32 temp;
1985 aper_size_info_32 *current_size;
1987 current_size = A_SIZE_32(agp_bridge.current_size);
1989 /* aperture size and gatt addr */
1990 pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
1991 agp_bridge.gatt_bus_addr | current_size->size_value);
1993 /* tlb control */
1994 pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
1995 pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
1996 ((temp & 0xffffff00) | 0x00000010));
1998 /* address to map to */
1999 pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp);
2000 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
2001 return 0;
2004 static unsigned long ali_mask_memory(unsigned long addr, int type)
2006 /* Memory type is ignored */
2008 return addr | agp_bridge.masks[0].mask;
2012 /* Setup function */
2013 static gatt_mask ali_generic_masks[] =
2015 {0x00000000, 0}
2018 static aper_size_info_32 ali_generic_sizes[7] =
2020 {256, 65536, 6, 10},
2021 {128, 32768, 5, 9},
2022 {64, 16384, 4, 8},
2023 {32, 8192, 3, 7},
2024 {16, 4096, 2, 6},
2025 {8, 2048, 1, 4},
2026 {4, 1024, 0, 3}
2029 static int __init ali_generic_setup (struct pci_dev *pdev)
2031 agp_bridge.masks = ali_generic_masks;
2032 agp_bridge.num_of_masks = 1;
2033 agp_bridge.aperture_sizes = (void *) ali_generic_sizes;
2034 agp_bridge.size_type = U32_APER_SIZE;
2035 agp_bridge.num_aperture_sizes = 7;
2036 agp_bridge.dev_private_data = NULL;
2037 agp_bridge.needs_scratch_page = FALSE;
2038 agp_bridge.configure = ali_configure;
2039 agp_bridge.fetch_size = ali_fetch_size;
2040 agp_bridge.cleanup = ali_cleanup;
2041 agp_bridge.tlb_flush = ali_tlbflush;
2042 agp_bridge.mask_memory = ali_mask_memory;
2043 agp_bridge.agp_enable = agp_generic_agp_enable;
2044 agp_bridge.cache_flush = global_cache_flush;
2045 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
2046 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
2047 agp_bridge.insert_memory = agp_generic_insert_memory;
2048 agp_bridge.remove_memory = agp_generic_remove_memory;
2049 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
2050 agp_bridge.free_by_type = agp_generic_free_by_type;
2052 return 0;
2054 (void) pdev; /* unused */
2057 #endif /* CONFIG_AGP_ALI */
2060 /* per-chipset initialization data.
2061 * note -- all chipsets for a single vendor MUST be grouped together
2063 static struct {
2064 unsigned short device_id; /* first, to make table easier to read */
2065 unsigned short vendor_id;
2066 enum chipset_type chipset;
2067 const char *vendor_name;
2068 const char *chipset_name;
2069 int (*chipset_setup) (struct pci_dev *pdev);
2070 } agp_bridge_info[] __initdata = {
2072 #ifdef CONFIG_AGP_ALI
2073 { PCI_DEVICE_ID_AL_M1541_0,
2074 PCI_VENDOR_ID_AL,
2075 ALI_M1541,
2076 "Ali",
2077 "M1541",
2078 ali_generic_setup },
2079 { 0,
2080 PCI_VENDOR_ID_AL,
2081 ALI_GENERIC,
2082 "Ali",
2083 "Generic",
2084 ali_generic_setup },
2085 #endif /* CONFIG_AGP_ALI */
2087 #ifdef CONFIG_AGP_AMD
2088 { PCI_DEVICE_ID_AMD_IRONGATE_0,
2089 PCI_VENDOR_ID_AMD,
2090 AMD_IRONGATE,
2091 "AMD",
2092 "Irongate",
2093 amd_irongate_setup },
2094 { 0,
2095 PCI_VENDOR_ID_AMD,
2096 AMD_GENERIC,
2097 "AMD",
2098 "Generic",
2099 amd_irongate_setup },
2100 #endif /* CONFIG_AGP_AMD */
2102 #ifdef CONFIG_AGP_INTEL
2103 { PCI_DEVICE_ID_INTEL_82443LX_0,
2104 PCI_VENDOR_ID_INTEL,
2105 INTEL_LX,
2106 "Intel",
2107 "440LX",
2108 intel_generic_setup },
2109 { PCI_DEVICE_ID_INTEL_82443BX_0,
2110 PCI_VENDOR_ID_INTEL,
2111 INTEL_BX,
2112 "Intel",
2113 "440BX",
2114 intel_generic_setup },
2115 { PCI_DEVICE_ID_INTEL_82443GX_0,
2116 PCI_VENDOR_ID_INTEL,
2117 INTEL_GX,
2118 "Intel",
2119 "440GX",
2120 intel_generic_setup },
2121 /* could we add support for PCI_DEVICE_ID_INTEL_815_1 too ? */
2122 { PCI_DEVICE_ID_INTEL_815_0,
2123 PCI_VENDOR_ID_INTEL,
2124 INTEL_I815,
2125 "Intel",
2126 "i815",
2127 intel_generic_setup },
2128 { PCI_DEVICE_ID_INTEL_840_0,
2129 PCI_VENDOR_ID_INTEL,
2130 INTEL_I840,
2131 "Intel",
2132 "i840",
2133 intel_840_setup },
2134 { PCI_DEVICE_ID_INTEL_850_0,
2135 PCI_VENDOR_ID_INTEL,
2136 INTEL_I850,
2137 "Intel",
2138 "i850",
2139 intel_850_setup },
2140 { 0,
2141 PCI_VENDOR_ID_INTEL,
2142 INTEL_GENERIC,
2143 "Intel",
2144 "Generic",
2145 intel_generic_setup },
2146 #endif /* CONFIG_AGP_INTEL */
2148 #ifdef CONFIG_AGP_SIS
2149 { PCI_DEVICE_ID_SI_630,
2150 PCI_VENDOR_ID_SI,
2151 SIS_GENERIC,
2152 "SiS",
2153 "630",
2154 sis_generic_setup },
2155 { PCI_DEVICE_ID_SI_540,
2156 PCI_VENDOR_ID_SI,
2157 SIS_GENERIC,
2158 "SiS",
2159 "540",
2160 sis_generic_setup },
2161 { PCI_DEVICE_ID_SI_620,
2162 PCI_VENDOR_ID_SI,
2163 SIS_GENERIC,
2164 "SiS",
2165 "620",
2166 sis_generic_setup },
2167 { PCI_DEVICE_ID_SI_530,
2168 PCI_VENDOR_ID_SI,
2169 SIS_GENERIC,
2170 "SiS",
2171 "530",
2172 sis_generic_setup },
2173 { PCI_DEVICE_ID_SI_630,
2174 PCI_VENDOR_ID_SI,
2175 SIS_GENERIC,
2176 "SiS",
2177 "Generic",
2178 sis_generic_setup },
2179 { PCI_DEVICE_ID_SI_540,
2180 PCI_VENDOR_ID_SI,
2181 SIS_GENERIC,
2182 "SiS",
2183 "Generic",
2184 sis_generic_setup },
2185 { PCI_DEVICE_ID_SI_620,
2186 PCI_VENDOR_ID_SI,
2187 SIS_GENERIC,
2188 "SiS",
2189 "Generic",
2190 sis_generic_setup },
2191 { PCI_DEVICE_ID_SI_530,
2192 PCI_VENDOR_ID_SI,
2193 SIS_GENERIC,
2194 "SiS",
2195 "Generic",
2196 sis_generic_setup },
2197 { 0,
2198 PCI_VENDOR_ID_SI,
2199 SIS_GENERIC,
2200 "SiS",
2201 "Generic",
2202 sis_generic_setup },
2203 #endif /* CONFIG_AGP_SIS */
2205 #ifdef CONFIG_AGP_VIA
2206 { PCI_DEVICE_ID_VIA_8501_0,
2207 PCI_VENDOR_ID_VIA,
2208 VIA_MVP4,
2209 "Via",
2210 "MVP4",
2211 via_generic_setup },
2212 { PCI_DEVICE_ID_VIA_82C597_0,
2213 PCI_VENDOR_ID_VIA,
2214 VIA_VP3,
2215 "Via",
2216 "VP3",
2217 via_generic_setup },
2218 { PCI_DEVICE_ID_VIA_82C598_0,
2219 PCI_VENDOR_ID_VIA,
2220 VIA_MVP3,
2221 "Via",
2222 "MVP3",
2223 via_generic_setup },
2224 { PCI_DEVICE_ID_VIA_82C691_0,
2225 PCI_VENDOR_ID_VIA,
2226 VIA_APOLLO_PRO,
2227 "Via",
2228 "Apollo Pro",
2229 via_generic_setup },
2230 { PCI_DEVICE_ID_VIA_8371_0,
2231 PCI_VENDOR_ID_VIA,
2232 VIA_APOLLO_KX133,
2233 "Via",
2234 "Apollo Pro KX133",
2235 via_generic_setup },
2236 { PCI_DEVICE_ID_VIA_8363_0,
2237 PCI_VENDOR_ID_VIA,
2238 VIA_APOLLO_KT133,
2239 "Via",
2240 "Apollo Pro KT133",
2241 via_generic_setup },
2242 { 0,
2243 PCI_VENDOR_ID_VIA,
2244 VIA_GENERIC,
2245 "Via",
2246 "Generic",
2247 via_generic_setup },
2248 #endif /* CONFIG_AGP_VIA */
2250 { 0, }, /* dummy final entry, always present */
2254 /* scan table above for supported devices */
2255 static int __init agp_lookup_host_bridge (struct pci_dev *pdev)
2257 int i;
2259 for (i = 0; i < ARRAY_SIZE (agp_bridge_info); i++)
2260 if (pdev->vendor == agp_bridge_info[i].vendor_id)
2261 break;
2263 if (i >= ARRAY_SIZE (agp_bridge_info)) {
2264 printk (KERN_DEBUG PFX "unsupported bridge\n");
2265 return -ENODEV;
2268 while ((i < ARRAY_SIZE (agp_bridge_info)) &&
2269 (agp_bridge_info[i].vendor_id == pdev->vendor)) {
2270 if (pdev->device == agp_bridge_info[i].device_id) {
2271 printk (KERN_INFO PFX "Detected %s %s chipset\n",
2272 agp_bridge_info[i].vendor_name,
2273 agp_bridge_info[i].chipset_name);
2274 agp_bridge.type = agp_bridge_info[i].chipset;
2275 return agp_bridge_info[i].chipset_setup (pdev);
2278 i++;
2281 i--; /* point to vendor generic entry (device_id == 0) */
2283 /* try init anyway, if user requests it AND
2284 * there is a 'generic' bridge entry for this vendor */
2285 if (agp_try_unsupported && agp_bridge_info[i].device_id == 0) {
2286 printk(KERN_WARNING PFX "Trying generic %s routines"
2287 " for device id: %04x\n",
2288 agp_bridge_info[i].vendor_name, pdev->device);
2289 agp_bridge.type = agp_bridge_info[i].chipset;
2290 return agp_bridge_info[i].chipset_setup (pdev);
2293 printk(KERN_ERR PFX "Unsupported %s chipset (device id: %04x),"
2294 " you might want to try agp_try_unsupported=1.\n",
2295 agp_bridge_info[i].vendor_name, pdev->device);
2296 return -ENODEV;
2300 /* Supported Device Scanning routine */
2302 static int __init agp_find_supported_device(void)
2304 struct pci_dev *dev = NULL;
2305 u8 cap_ptr = 0x00;
2306 u32 cap_id, scratch;
2308 if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL)
2309 return -ENODEV;
2311 agp_bridge.dev = dev;
2313 /* Need to test for I810 here */
2314 #ifdef CONFIG_AGP_I810
2315 if (dev->vendor == PCI_VENDOR_ID_INTEL) {
2316 struct pci_dev *i810_dev;
2318 switch (dev->device) {
2319 case PCI_DEVICE_ID_INTEL_810_0:
2320 i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
2321 PCI_DEVICE_ID_INTEL_810_1,
2322 NULL);
2323 if (i810_dev == NULL) {
2324 printk(KERN_ERR PFX "Detected an Intel i810,"
2325 " but could not find the secondary"
2326 " device.\n");
2327 return -ENODEV;
2329 printk(KERN_INFO PFX "Detected an Intel "
2330 "i810 Chipset.\n");
2331 agp_bridge.type = INTEL_I810;
2332 return intel_i810_setup (i810_dev);
2334 case PCI_DEVICE_ID_INTEL_810_DC100_0:
2335 i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
2336 PCI_DEVICE_ID_INTEL_810_DC100_1,
2337 NULL);
2338 if (i810_dev == NULL) {
2339 printk(KERN_ERR PFX "Detected an Intel i810 "
2340 "DC100, but could not find the "
2341 "secondary device.\n");
2342 return -ENODEV;
2344 printk(KERN_INFO PFX "Detected an Intel i810 "
2345 "DC100 Chipset.\n");
2346 agp_bridge.type = INTEL_I810;
2347 return intel_i810_setup(i810_dev);
2349 case PCI_DEVICE_ID_INTEL_810_E_0:
2350 i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
2351 PCI_DEVICE_ID_INTEL_810_E_1,
2352 NULL);
2353 if (i810_dev == NULL) {
2354 printk(KERN_ERR PFX "Detected an Intel i810 E"
2355 ", but could not find the secondary "
2356 "device.\n");
2357 return -ENODEV;
2359 printk(KERN_INFO PFX "Detected an Intel i810 E "
2360 "Chipset.\n");
2361 agp_bridge.type = INTEL_I810;
2362 return intel_i810_setup(i810_dev);
2364 case PCI_DEVICE_ID_INTEL_815_0:
2365 /* The i815 can operate either as an i810 style
2366 * integrated device, or as an AGP4X motherboard.
2368 * This only addresses the first mode:
2370 i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
2371 PCI_DEVICE_ID_INTEL_815_1,
2372 NULL);
2373 if (i810_dev == NULL) {
2374 printk(KERN_ERR PFX "agpgart: Detected an "
2375 "Intel i815, but could not find the"
2376 " secondary device.\n");
2377 agp_bridge.type = NOT_SUPPORTED;
2378 return -ENODEV;
2380 printk(KERN_INFO PFX "agpgart: Detected an Intel i815 "
2381 "Chipset.\n");
2382 agp_bridge.type = INTEL_I810;
2383 return intel_i810_setup(i810_dev);
2385 default:
2386 break;
2389 #endif /* CONFIG_AGP_I810 */
2391 /* find capndx */
2392 pci_read_config_dword(dev, 0x04, &scratch);
2393 if (!(scratch & 0x00100000))
2394 return -ENODEV;
2396 pci_read_config_byte(dev, 0x34, &cap_ptr);
2397 if (cap_ptr != 0x00) {
2398 do {
2399 pci_read_config_dword(dev, cap_ptr, &cap_id);
2401 if ((cap_id & 0xff) != 0x02)
2402 cap_ptr = (cap_id >> 8) & 0xff;
2404 while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
2406 if (cap_ptr == 0x00)
2407 return -ENODEV;
2408 agp_bridge.capndx = cap_ptr;
2410 /* Fill in the mode register */
2411 pci_read_config_dword(agp_bridge.dev,
2412 agp_bridge.capndx + 4,
2413 &agp_bridge.mode);
2415 /* probe for known chipsets */
2416 return agp_lookup_host_bridge (dev);
2419 struct agp_max_table {
2420 int mem;
2421 int agp;
2424 static struct agp_max_table maxes_table[9] __initdata =
2426 {0, 0},
2427 {32, 4},
2428 {64, 28},
2429 {128, 96},
2430 {256, 204},
2431 {512, 440},
2432 {1024, 942},
2433 {2048, 1920},
2434 {4096, 3932}
2437 static int __init agp_find_max (void)
2439 long memory, index, result;
2441 memory = virt_to_phys(high_memory) >> 20;
2442 index = 1;
2444 while ((memory > maxes_table[index].mem) &&
2445 (index < 8)) {
2446 index++;
2449 result = maxes_table[index - 1].agp +
2450 ( (memory - maxes_table[index - 1].mem) *
2451 (maxes_table[index].agp - maxes_table[index - 1].agp)) /
2452 (maxes_table[index].mem - maxes_table[index - 1].mem);
2454 printk(KERN_INFO PFX "Maximum main memory to use "
2455 "for agp memory: %ldM\n", result);
2456 result = result << (20 - PAGE_SHIFT);
2457 return result;
2460 #define AGPGART_VERSION_MAJOR 0
2461 #define AGPGART_VERSION_MINOR 99
2463 static agp_version agp_current_version =
2465 AGPGART_VERSION_MAJOR,
2466 AGPGART_VERSION_MINOR
2469 static int __init agp_backend_initialize(void)
2471 int size_value, rc, got_gatt=0, got_keylist=0;
2473 memset(&agp_bridge, 0, sizeof(struct agp_bridge_data));
2474 agp_bridge.type = NOT_SUPPORTED;
2475 agp_bridge.max_memory_agp = agp_find_max();
2476 agp_bridge.version = &agp_current_version;
2478 rc = agp_find_supported_device();
2479 if (rc) {
2480 /* not KERN_ERR because error msg should have already printed */
2481 printk(KERN_DEBUG PFX "no supported devices found.\n");
2482 return rc;
2485 if (agp_bridge.needs_scratch_page == TRUE) {
2486 agp_bridge.scratch_page = agp_alloc_page();
2488 if (agp_bridge.scratch_page == 0) {
2489 printk(KERN_ERR PFX "unable to get memory for "
2490 "scratch page.\n");
2491 return -ENOMEM;
2493 agp_bridge.scratch_page =
2494 virt_to_phys((void *) agp_bridge.scratch_page);
2495 agp_bridge.scratch_page =
2496 agp_bridge.mask_memory(agp_bridge.scratch_page, 0);
2499 size_value = agp_bridge.fetch_size();
2501 if (size_value == 0) {
2502 printk(KERN_ERR PFX "unable to detrimine aperture size.\n");
2503 rc = -EINVAL;
2504 goto err_out;
2506 if (agp_bridge.create_gatt_table()) {
2507 printk(KERN_ERR PFX "unable to get memory for graphics "
2508 "translation table.\n");
2509 rc = -ENOMEM;
2510 goto err_out;
2512 got_gatt = 1;
2514 agp_bridge.key_list = vmalloc(PAGE_SIZE * 4);
2515 if (agp_bridge.key_list == NULL) {
2516 printk(KERN_ERR PFX "error allocating memory for key lists.\n");
2517 rc = -ENOMEM;
2518 goto err_out;
2520 got_keylist = 1;
2522 /* FIXME vmalloc'd memory not guaranteed contiguous */
2523 memset(agp_bridge.key_list, 0, PAGE_SIZE * 4);
2525 if (agp_bridge.configure()) {
2526 printk(KERN_ERR PFX "error configuring host chipset.\n");
2527 rc = -EINVAL;
2528 goto err_out;
2531 printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n",
2532 size_value, agp_bridge.gart_bus_addr);
2534 return 0;
2536 err_out:
2537 if (agp_bridge.needs_scratch_page == TRUE) {
2538 agp_bridge.scratch_page &= ~(0x00000fff);
2539 agp_destroy_page((unsigned long)
2540 phys_to_virt(agp_bridge.scratch_page));
2542 if (got_gatt)
2543 agp_bridge.free_gatt_table();
2544 if (got_keylist)
2545 vfree(agp_bridge.key_list);
2546 return rc;
2550 /* cannot be __exit b/c as it could be called from __init code */
2551 static void agp_backend_cleanup(void)
2553 agp_bridge.cleanup();
2554 agp_bridge.free_gatt_table();
2555 vfree(agp_bridge.key_list);
2557 if (agp_bridge.needs_scratch_page == TRUE) {
2558 agp_bridge.scratch_page &= ~(0x00000fff);
2559 agp_destroy_page((unsigned long)
2560 phys_to_virt(agp_bridge.scratch_page));
2564 extern int agp_frontend_initialize(void);
2565 extern void agp_frontend_cleanup(void);
2567 static const drm_agp_t drm_agp = {
2568 &agp_free_memory,
2569 &agp_allocate_memory,
2570 &agp_bind_memory,
2571 &agp_unbind_memory,
2572 &agp_enable,
2573 &agp_backend_acquire,
2574 &agp_backend_release,
2575 &agp_copy_info
2578 static int __init agp_init(void)
2580 int ret_val;
2582 printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Jeff Hartmann\n",
2583 AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
2585 ret_val = agp_backend_initialize();
2586 if (ret_val) {
2587 agp_bridge.type = NOT_SUPPORTED;
2588 return ret_val;
2590 ret_val = agp_frontend_initialize();
2591 if (ret_val) {
2592 agp_bridge.type = NOT_SUPPORTED;
2593 agp_backend_cleanup();
2594 return ret_val;
2597 inter_module_register("drm_agp", THIS_MODULE, &drm_agp);
2598 return 0;
2601 static void __exit agp_cleanup(void)
2603 agp_frontend_cleanup();
2604 agp_backend_cleanup();
2605 inter_module_unregister("drm_agp");
2608 module_init(agp_init);
2609 module_exit(agp_cleanup);