Linux 2.4.0-test7-pre6
[davej-history.git] / drivers / char / agp / agpgart_be.c
blob44bd3bfb471b02d8a95a3c17c7fe21f4025c4ef4
1 /*
2 * AGPGART module version 0.99
3 * Copyright (C) 1999 Jeff Hartmann
4 * Copyright (C) 1999 Precision Insight, Inc.
5 * Copyright (C) 1999 Xi Graphics, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
23 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/config.h>
27 #include <linux/version.h>
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/kernel.h>
31 #include <linux/sched.h>
32 #include <linux/mm.h>
33 #include <linux/string.h>
34 #include <linux/errno.h>
35 #include <linux/malloc.h>
36 #include <linux/vmalloc.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/pagemap.h>
40 #include <linux/miscdevice.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
44 #include <asm/page.h>
46 #include <linux/agp_backend.h>
47 #include "agp.h"
49 MODULE_AUTHOR("Jeff Hartmann <jhartmann@precisioninsight.com>");
50 MODULE_PARM(agp_try_unsupported, "1i");
51 EXPORT_SYMBOL(agp_free_memory);
52 EXPORT_SYMBOL(agp_allocate_memory);
53 EXPORT_SYMBOL(agp_copy_info);
54 EXPORT_SYMBOL(agp_bind_memory);
55 EXPORT_SYMBOL(agp_unbind_memory);
56 EXPORT_SYMBOL(agp_enable);
57 EXPORT_SYMBOL(agp_backend_acquire);
58 EXPORT_SYMBOL(agp_backend_release);
60 static void flush_cache(void);
62 static struct agp_bridge_data agp_bridge;
63 static int agp_try_unsupported __initdata = 0;
66 static inline void flush_cache(void)
68 #if defined(__i386__)
69 asm volatile ("wbinvd":::"memory");
70 #elif defined(__alpha__) || defined(__ia64__)
71 /* ??? I wonder if we'll really need to flush caches, or if the
72 core logic can manage to keep the system coherent. The ARM
73 speaks only of using `cflush' to get things in memory in
74 preparation for power failure.
76 If we do need to call `cflush', we'll need a target page,
77 as we can only flush one page at a time.
79 Ditto for IA-64. --davidm 00/08/07 */
80 mb();
81 #else
82 #error "Please define flush_cache."
83 #endif
86 #ifdef CONFIG_SMP
87 static atomic_t cpus_waiting;
89 static void ipi_handler(void *null)
91 flush_cache();
92 atomic_dec(&cpus_waiting);
93 while (atomic_read(&cpus_waiting) > 0)
94 barrier();
97 static void smp_flush_cache(void)
99 atomic_set(&cpus_waiting, smp_num_cpus - 1);
100 if (smp_call_function(ipi_handler, NULL, 1, 0) != 0)
101 panic(PFX "timed out waiting for the other CPUs!\n");
102 flush_cache();
103 while (atomic_read(&cpus_waiting) > 0)
104 barrier();
106 #define global_cache_flush smp_flush_cache
107 #else /* CONFIG_SMP */
108 #define global_cache_flush flush_cache
109 #endif /* CONFIG_SMP */
111 int agp_backend_acquire(void)
113 if (agp_bridge.type == NOT_SUPPORTED) {
114 return -EINVAL;
116 atomic_inc(&agp_bridge.agp_in_use);
118 if (atomic_read(&agp_bridge.agp_in_use) != 1) {
119 atomic_dec(&agp_bridge.agp_in_use);
120 return -EBUSY;
122 MOD_INC_USE_COUNT;
123 return 0;
126 void agp_backend_release(void)
128 if (agp_bridge.type == NOT_SUPPORTED) {
129 return;
131 atomic_dec(&agp_bridge.agp_in_use);
132 MOD_DEC_USE_COUNT;
136 * Basic Page Allocation Routines -
137 * These routines handle page allocation
138 * and by default they reserve the allocated
139 * memory. They also handle incrementing the
140 * current_memory_agp value, Which is checked
141 * against a maximum value.
144 static unsigned long agp_alloc_page(void)
146 void *pt;
148 pt = (void *) __get_free_page(GFP_KERNEL);
149 if (pt == NULL) {
150 return 0;
152 atomic_inc(&virt_to_page(pt)->count);
153 set_bit(PG_locked, &virt_to_page(pt)->flags);
154 atomic_inc(&agp_bridge.current_memory_agp);
155 return (unsigned long) pt;
158 static void agp_destroy_page(unsigned long page)
160 void *pt = (void *) page;
162 if (pt == NULL) {
163 return;
165 atomic_dec(&virt_to_page(pt)->count);
166 clear_bit(PG_locked, &virt_to_page(pt)->flags);
167 wake_up(&virt_to_page(pt)->wait);
168 free_page((unsigned long) pt);
169 atomic_dec(&agp_bridge.current_memory_agp);
172 /* End Basic Page Allocation Routines */
175 * Generic routines for handling agp_memory structures -
176 * They use the basic page allocation routines to do the
177 * brunt of the work.
181 static void agp_free_key(int key)
184 if (key < 0) {
185 return;
187 if (key < MAXKEY) {
188 clear_bit(key, agp_bridge.key_list);
192 static int agp_get_key(void)
194 int bit;
196 bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY);
197 if (bit < MAXKEY) {
198 set_bit(bit, agp_bridge.key_list);
199 return bit;
201 return -1;
204 static agp_memory *agp_create_memory(int scratch_pages)
206 agp_memory *new;
208 new = kmalloc(sizeof(agp_memory), GFP_KERNEL);
210 if (new == NULL) {
211 return NULL;
213 memset(new, 0, sizeof(agp_memory));
214 new->key = agp_get_key();
216 if (new->key < 0) {
217 kfree(new);
218 return NULL;
220 new->memory = vmalloc(PAGE_SIZE * scratch_pages);
222 if (new->memory == NULL) {
223 agp_free_key(new->key);
224 kfree(new);
225 return NULL;
227 new->num_scratch_pages = scratch_pages;
228 return new;
231 void agp_free_memory(agp_memory * curr)
233 int i;
235 if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL)) {
236 return;
238 if (curr->is_bound == TRUE) {
239 agp_unbind_memory(curr);
241 if (curr->type != 0) {
242 agp_bridge.free_by_type(curr);
243 return;
245 if (curr->page_count != 0) {
246 for (i = 0; i < curr->page_count; i++) {
247 curr->memory[i] &= ~(0x00000fff);
248 agp_destroy_page((unsigned long)
249 phys_to_virt(curr->memory[i]));
252 agp_free_key(curr->key);
253 vfree(curr->memory);
254 kfree(curr);
255 MOD_DEC_USE_COUNT;
258 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
260 agp_memory *agp_allocate_memory(size_t page_count, u32 type)
262 int scratch_pages;
263 agp_memory *new;
264 int i;
266 if (agp_bridge.type == NOT_SUPPORTED) {
267 return NULL;
269 if ((atomic_read(&agp_bridge.current_memory_agp) + page_count) >
270 agp_bridge.max_memory_agp) {
271 return NULL;
274 if (type != 0) {
275 new = agp_bridge.alloc_by_type(page_count, type);
276 return new;
278 /* We always increase the module count, since free auto-decrements
279 * it
282 MOD_INC_USE_COUNT;
284 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
286 new = agp_create_memory(scratch_pages);
288 if (new == NULL) {
289 MOD_DEC_USE_COUNT;
290 return NULL;
292 for (i = 0; i < page_count; i++) {
293 new->memory[i] = agp_alloc_page();
295 if (new->memory[i] == 0) {
296 /* Free this structure */
297 agp_free_memory(new);
298 return NULL;
300 new->memory[i] =
301 agp_bridge.mask_memory(
302 virt_to_phys((void *) new->memory[i]),
303 type);
304 new->page_count++;
307 return new;
310 /* End - Generic routines for handling agp_memory structures */
312 static int agp_return_size(void)
314 int current_size;
315 void *temp;
317 temp = agp_bridge.current_size;
319 switch (agp_bridge.size_type) {
320 case U8_APER_SIZE:
321 current_size = A_SIZE_8(temp)->size;
322 break;
323 case U16_APER_SIZE:
324 current_size = A_SIZE_16(temp)->size;
325 break;
326 case U32_APER_SIZE:
327 current_size = A_SIZE_32(temp)->size;
328 break;
329 case LVL2_APER_SIZE:
330 current_size = A_SIZE_LVL2(temp)->size;
331 break;
332 case FIXED_APER_SIZE:
333 current_size = A_SIZE_FIX(temp)->size;
334 break;
335 default:
336 current_size = 0;
337 break;
340 return current_size;
343 /* Routine to copy over information structure */
345 void agp_copy_info(agp_kern_info * info)
347 memset(info, 0, sizeof(agp_kern_info));
348 if (agp_bridge.type == NOT_SUPPORTED) {
349 info->chipset = agp_bridge.type;
350 return;
352 info->version.major = agp_bridge.version->major;
353 info->version.minor = agp_bridge.version->minor;
354 info->device = agp_bridge.dev;
355 info->chipset = agp_bridge.type;
356 info->mode = agp_bridge.mode;
357 info->aper_base = agp_bridge.gart_bus_addr;
358 info->aper_size = agp_return_size();
359 info->max_memory = agp_bridge.max_memory_agp;
360 info->current_memory = atomic_read(&agp_bridge.current_memory_agp);
363 /* End - Routine to copy over information structure */
366 * Routines for handling swapping of agp_memory into the GATT -
367 * These routines take agp_memory and insert them into the GATT.
368 * They call device specific routines to actually write to the GATT.
371 int agp_bind_memory(agp_memory * curr, off_t pg_start)
373 int ret_val;
375 if ((agp_bridge.type == NOT_SUPPORTED) ||
376 (curr == NULL) || (curr->is_bound == TRUE)) {
377 return -EINVAL;
379 if (curr->is_flushed == FALSE) {
380 CACHE_FLUSH();
381 curr->is_flushed = TRUE;
383 ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type);
385 if (ret_val != 0) {
386 return ret_val;
388 curr->is_bound = TRUE;
389 curr->pg_start = pg_start;
390 return 0;
393 int agp_unbind_memory(agp_memory * curr)
395 int ret_val;
397 if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL)) {
398 return -EINVAL;
400 if (curr->is_bound != TRUE) {
401 return -EINVAL;
403 ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type);
405 if (ret_val != 0) {
406 return ret_val;
408 curr->is_bound = FALSE;
409 curr->pg_start = 0;
410 return 0;
413 /* End - Routines for handling swapping of agp_memory into the GATT */
416 * Driver routines - start
417 * Currently this module supports the following chipsets:
418 * i810, 440lx, 440bx, 440gx, via vp3, via mvp3, via kx133, via kt133,
419 * amd irongate, ALi M1541, and generic support for the SiS chipsets.
422 /* Generic Agp routines - Start */
424 static void agp_generic_agp_enable(u32 mode)
426 struct pci_dev *device = NULL;
427 u32 command, scratch, cap_id;
428 u8 cap_ptr;
430 pci_read_config_dword(agp_bridge.dev,
431 agp_bridge.capndx + 4,
432 &command);
435 * PASS1: go throu all devices that claim to be
436 * AGP devices and collect their data.
439 while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8,
440 device)) != NULL) {
441 pci_read_config_dword(device, 0x04, &scratch);
443 if (!(scratch & 0x00100000))
444 continue;
446 pci_read_config_byte(device, 0x34, &cap_ptr);
448 if (cap_ptr != 0x00) {
449 do {
450 pci_read_config_dword(device,
451 cap_ptr, &cap_id);
453 if ((cap_id & 0xff) != 0x02)
454 cap_ptr = (cap_id >> 8) & 0xff;
456 while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
458 if (cap_ptr != 0x00) {
460 * Ok, here we have a AGP device. Disable impossible
461 * settings, and adjust the readqueue to the minimum.
464 pci_read_config_dword(device, cap_ptr + 4, &scratch);
466 /* adjust RQ depth */
467 command =
468 ((command & ~0xff000000) |
469 min((mode & 0xff000000),
470 min((command & 0xff000000),
471 (scratch & 0xff000000))));
473 /* disable SBA if it's not supported */
474 if (!((command & 0x00000200) &&
475 (scratch & 0x00000200) &&
476 (mode & 0x00000200)))
477 command &= ~0x00000200;
479 /* disable FW if it's not supported */
480 if (!((command & 0x00000010) &&
481 (scratch & 0x00000010) &&
482 (mode & 0x00000010)))
483 command &= ~0x00000010;
485 if (!((command & 4) &&
486 (scratch & 4) &&
487 (mode & 4)))
488 command &= ~0x00000004;
490 if (!((command & 2) &&
491 (scratch & 2) &&
492 (mode & 2)))
493 command &= ~0x00000002;
495 if (!((command & 1) &&
496 (scratch & 1) &&
497 (mode & 1)))
498 command &= ~0x00000001;
502 * PASS2: Figure out the 4X/2X/1X setting and enable the
503 * target (our motherboard chipset).
506 if (command & 4) {
507 command &= ~3; /* 4X */
509 if (command & 2) {
510 command &= ~5; /* 2X */
512 if (command & 1) {
513 command &= ~6; /* 1X */
515 command |= 0x00000100;
517 pci_write_config_dword(agp_bridge.dev,
518 agp_bridge.capndx + 8,
519 command);
522 * PASS3: Go throu all AGP devices and update the
523 * command registers.
526 while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8,
527 device)) != NULL) {
528 pci_read_config_dword(device, 0x04, &scratch);
530 if (!(scratch & 0x00100000))
531 continue;
533 pci_read_config_byte(device, 0x34, &cap_ptr);
535 if (cap_ptr != 0x00) {
536 do {
537 pci_read_config_dword(device,
538 cap_ptr, &cap_id);
540 if ((cap_id & 0xff) != 0x02)
541 cap_ptr = (cap_id >> 8) & 0xff;
543 while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
545 if (cap_ptr != 0x00)
546 pci_write_config_dword(device, cap_ptr + 8, command);
550 static int agp_generic_create_gatt_table(void)
552 char *table;
553 char *table_end;
554 int size;
555 int page_order;
556 int num_entries;
557 int i;
558 void *temp;
559 struct page *page;
561 /* The generic routines can't handle 2 level gatt's */
562 if (agp_bridge.size_type == LVL2_APER_SIZE) {
563 return -EINVAL;
566 table = NULL;
567 i = agp_bridge.aperture_size_idx;
568 temp = agp_bridge.current_size;
569 size = page_order = num_entries = 0;
571 if (agp_bridge.size_type != FIXED_APER_SIZE) {
572 do {
573 switch (agp_bridge.size_type) {
574 case U8_APER_SIZE:
575 size = A_SIZE_8(temp)->size;
576 page_order =
577 A_SIZE_8(temp)->page_order;
578 num_entries =
579 A_SIZE_8(temp)->num_entries;
580 break;
581 case U16_APER_SIZE:
582 size = A_SIZE_16(temp)->size;
583 page_order = A_SIZE_16(temp)->page_order;
584 num_entries = A_SIZE_16(temp)->num_entries;
585 break;
586 case U32_APER_SIZE:
587 size = A_SIZE_32(temp)->size;
588 page_order = A_SIZE_32(temp)->page_order;
589 num_entries = A_SIZE_32(temp)->num_entries;
590 break;
591 /* This case will never really happen. */
592 case FIXED_APER_SIZE:
593 case LVL2_APER_SIZE:
594 default:
595 size = page_order = num_entries = 0;
596 break;
599 table = (char *) __get_free_pages(GFP_KERNEL,
600 page_order);
602 if (table == NULL) {
603 i++;
604 switch (agp_bridge.size_type) {
605 case U8_APER_SIZE:
606 agp_bridge.current_size = A_IDX8();
607 break;
608 case U16_APER_SIZE:
609 agp_bridge.current_size = A_IDX16();
610 break;
611 case U32_APER_SIZE:
612 agp_bridge.current_size = A_IDX32();
613 break;
614 /* This case will never really
615 * happen.
617 case FIXED_APER_SIZE:
618 case LVL2_APER_SIZE:
619 default:
620 agp_bridge.current_size =
621 agp_bridge.current_size;
622 break;
624 } else {
625 agp_bridge.aperture_size_idx = i;
627 } while ((table == NULL) &&
628 (i < agp_bridge.num_aperture_sizes));
629 } else {
630 size = ((aper_size_info_fixed *) temp)->size;
631 page_order = ((aper_size_info_fixed *) temp)->page_order;
632 num_entries = ((aper_size_info_fixed *) temp)->num_entries;
633 table = (char *) __get_free_pages(GFP_KERNEL, page_order);
636 if (table == NULL) {
637 return -ENOMEM;
639 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
641 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
642 set_bit(PG_reserved, &page->flags);
644 agp_bridge.gatt_table_real = (unsigned long *) table;
645 CACHE_FLUSH();
646 agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
647 (PAGE_SIZE * (1 << page_order)));
648 CACHE_FLUSH();
650 if (agp_bridge.gatt_table == NULL) {
651 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
652 clear_bit(PG_reserved, &page->flags);
654 free_pages((unsigned long) table, page_order);
656 return -ENOMEM;
658 agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real);
660 for (i = 0; i < num_entries; i++) {
661 agp_bridge.gatt_table[i] =
662 (unsigned long) agp_bridge.scratch_page;
665 return 0;
668 static int agp_generic_free_gatt_table(void)
670 int page_order;
671 char *table, *table_end;
672 void *temp;
673 struct page *page;
675 temp = agp_bridge.current_size;
677 switch (agp_bridge.size_type) {
678 case U8_APER_SIZE:
679 page_order = A_SIZE_8(temp)->page_order;
680 break;
681 case U16_APER_SIZE:
682 page_order = A_SIZE_16(temp)->page_order;
683 break;
684 case U32_APER_SIZE:
685 page_order = A_SIZE_32(temp)->page_order;
686 break;
687 case FIXED_APER_SIZE:
688 page_order = A_SIZE_FIX(temp)->page_order;
689 break;
690 case LVL2_APER_SIZE:
691 /* The generic routines can't deal with 2 level gatt's */
692 return -EINVAL;
693 break;
694 default:
695 page_order = 0;
696 break;
699 /* Do not worry about freeing memory, because if this is
700 * called, then all agp memory is deallocated and removed
701 * from the table.
704 iounmap(agp_bridge.gatt_table);
705 table = (char *) agp_bridge.gatt_table_real;
706 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
708 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
709 clear_bit(PG_reserved, &page->flags);
711 free_pages((unsigned long) agp_bridge.gatt_table_real, page_order);
712 return 0;
715 static int agp_generic_insert_memory(agp_memory * mem,
716 off_t pg_start, int type)
718 int i, j, num_entries;
719 void *temp;
721 temp = agp_bridge.current_size;
723 switch (agp_bridge.size_type) {
724 case U8_APER_SIZE:
725 num_entries = A_SIZE_8(temp)->num_entries;
726 break;
727 case U16_APER_SIZE:
728 num_entries = A_SIZE_16(temp)->num_entries;
729 break;
730 case U32_APER_SIZE:
731 num_entries = A_SIZE_32(temp)->num_entries;
732 break;
733 case FIXED_APER_SIZE:
734 num_entries = A_SIZE_FIX(temp)->num_entries;
735 break;
736 case LVL2_APER_SIZE:
737 /* The generic routines can't deal with 2 level gatt's */
738 return -EINVAL;
739 break;
740 default:
741 num_entries = 0;
742 break;
745 if (type != 0 || mem->type != 0) {
746 /* The generic routines know nothing of memory types */
747 return -EINVAL;
749 if ((pg_start + mem->page_count) > num_entries) {
750 return -EINVAL;
752 j = pg_start;
754 while (j < (pg_start + mem->page_count)) {
755 if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
756 return -EBUSY;
758 j++;
761 if (mem->is_flushed == FALSE) {
762 CACHE_FLUSH();
763 mem->is_flushed = TRUE;
765 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
766 agp_bridge.gatt_table[j] = mem->memory[i];
769 agp_bridge.tlb_flush(mem);
770 return 0;
773 static int agp_generic_remove_memory(agp_memory * mem, off_t pg_start,
774 int type)
776 int i;
778 if (type != 0 || mem->type != 0) {
779 /* The generic routines know nothing of memory types */
780 return -EINVAL;
782 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
783 agp_bridge.gatt_table[i] =
784 (unsigned long) agp_bridge.scratch_page;
787 agp_bridge.tlb_flush(mem);
788 return 0;
791 static agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
793 return NULL;
796 static void agp_generic_free_by_type(agp_memory * curr)
798 if (curr->memory != NULL) {
799 vfree(curr->memory);
801 agp_free_key(curr->key);
802 kfree(curr);
805 void agp_enable(u32 mode)
807 if (agp_bridge.type == NOT_SUPPORTED) return;
808 agp_bridge.agp_enable(mode);
811 /* End - Generic Agp routines */
813 #ifdef CONFIG_AGP_I810
814 static aper_size_info_fixed intel_i810_sizes[] =
816 {64, 16384, 4},
817 /* The 32M mode still requires a 64k gatt */
818 {32, 8192, 4}
821 #define AGP_DCACHE_MEMORY 1
822 #define AGP_PHYS_MEMORY 2
824 static gatt_mask intel_i810_masks[] =
826 {I810_PTE_VALID, 0},
827 {(I810_PTE_VALID | I810_PTE_LOCAL), AGP_DCACHE_MEMORY},
828 {I810_PTE_VALID, 0}
831 static struct _intel_i810_private {
832 struct pci_dev *i810_dev; /* device one */
833 volatile u8 *registers;
834 int num_dcache_entries;
835 } intel_i810_private;
837 static int intel_i810_fetch_size(void)
839 u32 smram_miscc;
840 aper_size_info_fixed *values;
842 pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc);
843 values = A_SIZE_FIX(agp_bridge.aperture_sizes);
845 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
846 printk(KERN_WARNING PFX "i810 is disabled\n");
847 return 0;
849 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
850 agp_bridge.previous_size =
851 agp_bridge.current_size = (void *) (values + 1);
852 agp_bridge.aperture_size_idx = 1;
853 return values[1].size;
854 } else {
855 agp_bridge.previous_size =
856 agp_bridge.current_size = (void *) (values);
857 agp_bridge.aperture_size_idx = 0;
858 return values[0].size;
861 return 0;
864 static int intel_i810_configure(void)
866 aper_size_info_fixed *current_size;
867 u32 temp;
868 int i;
870 current_size = A_SIZE_FIX(agp_bridge.current_size);
872 pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp);
873 temp &= 0xfff80000;
875 intel_i810_private.registers =
876 (volatile u8 *) ioremap(temp, 128 * 4096);
878 if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL)
879 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
880 /* This will need to be dynamically assigned */
881 printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n");
882 intel_i810_private.num_dcache_entries = 1024;
884 pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp);
885 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
886 OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL,
887 agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED);
888 CACHE_FLUSH();
890 if (agp_bridge.needs_scratch_page == TRUE) {
891 for (i = 0; i < current_size->num_entries; i++) {
892 OUTREG32(intel_i810_private.registers,
893 I810_PTE_BASE + (i * 4),
894 agp_bridge.scratch_page);
897 return 0;
900 static void intel_i810_cleanup(void)
902 OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0);
903 iounmap((void *) intel_i810_private.registers);
906 static void intel_i810_tlbflush(agp_memory * mem)
908 return;
911 static void intel_i810_agp_enable(u32 mode)
913 return;
916 static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start,
917 int type)
919 int i, j, num_entries;
920 void *temp;
922 temp = agp_bridge.current_size;
923 num_entries = A_SIZE_FIX(temp)->num_entries;
925 if ((pg_start + mem->page_count) > num_entries) {
926 return -EINVAL;
928 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
929 if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
930 return -EBUSY;
934 if (type != 0 || mem->type != 0) {
935 if ((type == AGP_DCACHE_MEMORY) &&
936 (mem->type == AGP_DCACHE_MEMORY)) {
937 /* special insert */
938 CACHE_FLUSH();
939 for (i = pg_start;
940 i < (pg_start + mem->page_count); i++) {
941 OUTREG32(intel_i810_private.registers,
942 I810_PTE_BASE + (i * 4),
943 (i * 4096) | I810_PTE_LOCAL |
944 I810_PTE_VALID);
946 CACHE_FLUSH();
947 agp_bridge.tlb_flush(mem);
948 return 0;
950 if((type == AGP_PHYS_MEMORY) &&
951 (mem->type == AGP_PHYS_MEMORY)) {
952 goto insert;
954 return -EINVAL;
957 insert:
958 CACHE_FLUSH();
959 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
960 OUTREG32(intel_i810_private.registers,
961 I810_PTE_BASE + (j * 4), mem->memory[i]);
963 CACHE_FLUSH();
965 agp_bridge.tlb_flush(mem);
966 return 0;
969 static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start,
970 int type)
972 int i;
974 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
975 OUTREG32(intel_i810_private.registers,
976 I810_PTE_BASE + (i * 4),
977 agp_bridge.scratch_page);
980 CACHE_FLUSH();
981 agp_bridge.tlb_flush(mem);
982 return 0;
985 static agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
987 agp_memory *new;
989 if (type == AGP_DCACHE_MEMORY) {
990 if (pg_count != intel_i810_private.num_dcache_entries) {
991 return NULL;
993 new = agp_create_memory(1);
995 if (new == NULL) {
996 return NULL;
998 new->type = AGP_DCACHE_MEMORY;
999 new->page_count = pg_count;
1000 new->num_scratch_pages = 0;
1001 vfree(new->memory);
1002 MOD_INC_USE_COUNT;
1003 return new;
1005 if(type == AGP_PHYS_MEMORY) {
1006 /* The I810 requires a physical address to program
1007 * it's mouse pointer into hardware. However the
1008 * Xserver still writes to it through the agp
1009 * aperture
1011 if (pg_count != 1) {
1012 return NULL;
1014 new = agp_create_memory(1);
1016 if (new == NULL) {
1017 return NULL;
1019 MOD_INC_USE_COUNT;
1020 new->memory[0] = agp_alloc_page();
1022 if (new->memory[0] == 0) {
1023 /* Free this structure */
1024 agp_free_memory(new);
1025 return NULL;
1027 new->memory[0] =
1028 agp_bridge.mask_memory(
1029 virt_to_phys((void *) new->memory[0]),
1030 type);
1031 new->page_count = 1;
1032 new->num_scratch_pages = 1;
1033 new->type = AGP_PHYS_MEMORY;
1034 new->physical = virt_to_phys((void *) new->memory[0]);
1035 return new;
1038 return NULL;
1041 static void intel_i810_free_by_type(agp_memory * curr)
1043 agp_free_key(curr->key);
1044 if(curr->type == AGP_PHYS_MEMORY) {
1045 agp_destroy_page((unsigned long)
1046 phys_to_virt(curr->memory[0]));
1047 vfree(curr->memory);
1049 kfree(curr);
1050 MOD_DEC_USE_COUNT;
1053 static unsigned long intel_i810_mask_memory(unsigned long addr, int type)
1055 /* Type checking must be done elsewhere */
1056 return addr | agp_bridge.masks[type].mask;
1059 static int __init intel_i810_setup(struct pci_dev *i810_dev)
1061 intel_i810_private.i810_dev = i810_dev;
1063 agp_bridge.masks = intel_i810_masks;
1064 agp_bridge.num_of_masks = 2;
1065 agp_bridge.aperture_sizes = (void *) intel_i810_sizes;
1066 agp_bridge.size_type = FIXED_APER_SIZE;
1067 agp_bridge.num_aperture_sizes = 2;
1068 agp_bridge.dev_private_data = (void *) &intel_i810_private;
1069 agp_bridge.needs_scratch_page = TRUE;
1070 agp_bridge.configure = intel_i810_configure;
1071 agp_bridge.fetch_size = intel_i810_fetch_size;
1072 agp_bridge.cleanup = intel_i810_cleanup;
1073 agp_bridge.tlb_flush = intel_i810_tlbflush;
1074 agp_bridge.mask_memory = intel_i810_mask_memory;
1075 agp_bridge.agp_enable = intel_i810_agp_enable;
1076 agp_bridge.cache_flush = global_cache_flush;
1077 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1078 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1079 agp_bridge.insert_memory = intel_i810_insert_entries;
1080 agp_bridge.remove_memory = intel_i810_remove_entries;
1081 agp_bridge.alloc_by_type = intel_i810_alloc_by_type;
1082 agp_bridge.free_by_type = intel_i810_free_by_type;
1084 return 0;
1087 #endif /* CONFIG_AGP_I810 */
1089 #ifdef CONFIG_AGP_INTEL
1091 static int intel_fetch_size(void)
1093 int i;
1094 u16 temp;
1095 aper_size_info_16 *values;
1097 pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp);
1098 values = A_SIZE_16(agp_bridge.aperture_sizes);
1100 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1101 if (temp == values[i].size_value) {
1102 agp_bridge.previous_size =
1103 agp_bridge.current_size = (void *) (values + i);
1104 agp_bridge.aperture_size_idx = i;
1105 return values[i].size;
1109 return 0;
1112 static void intel_tlbflush(agp_memory * mem)
1114 pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200);
1115 pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
1118 static void intel_cleanup(void)
1120 u16 temp;
1121 aper_size_info_16 *previous_size;
1123 previous_size = A_SIZE_16(agp_bridge.previous_size);
1124 pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
1125 pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
1126 pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
1127 previous_size->size_value);
1130 static int intel_configure(void)
1132 u32 temp;
1133 u16 temp2;
1134 aper_size_info_16 *current_size;
1136 current_size = A_SIZE_16(agp_bridge.current_size);
1138 /* aperture size */
1139 pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
1140 current_size->size_value);
1142 /* address to map to */
1143 pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
1144 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1146 /* attbase - aperture base */
1147 pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
1148 agp_bridge.gatt_bus_addr);
1150 /* agpctrl */
1151 pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
1153 /* paccfg/nbxcfg */
1154 pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
1155 pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG,
1156 (temp2 & ~(1 << 10)) | (1 << 9));
1157 /* clear any possible error conditions */
1158 pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7);
1159 return 0;
1162 static int intel_840_configure(void)
1164 u32 temp;
1165 u16 temp2;
1166 aper_size_info_16 *current_size;
1168 current_size = A_SIZE_16(agp_bridge.current_size);
1170 /* aperture size */
1171 pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
1172 (char)current_size->size_value);
1174 /* address to map to */
1175 pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
1176 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1178 /* attbase - aperture base */
1179 pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
1180 agp_bridge.gatt_bus_addr);
1182 /* agpctrl */
1183 pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
1185 /* mcgcfg */
1186 pci_read_config_word(agp_bridge.dev, INTEL_I840_MCHCFG, &temp2);
1187 pci_write_config_word(agp_bridge.dev, INTEL_I840_MCHCFG,
1188 temp2 | (1 << 9));
1189 /* clear any possible error conditions */
1190 pci_write_config_word(agp_bridge.dev, INTEL_I840_ERRSTS, 0xc000);
1191 return 0;
1194 static unsigned long intel_mask_memory(unsigned long addr, int type)
1196 /* Memory type is ignored */
1198 return addr | agp_bridge.masks[0].mask;
1202 /* Setup function */
1203 static gatt_mask intel_generic_masks[] =
1205 {0x00000017, 0}
1208 static aper_size_info_16 intel_generic_sizes[7] =
1210 {256, 65536, 6, 0},
1211 {128, 32768, 5, 32},
1212 {64, 16384, 4, 48},
1213 {32, 8192, 3, 56},
1214 {16, 4096, 2, 60},
1215 {8, 2048, 1, 62},
1216 {4, 1024, 0, 63}
1219 static int __init intel_generic_setup (struct pci_dev *pdev)
1221 agp_bridge.masks = intel_generic_masks;
1222 agp_bridge.num_of_masks = 1;
1223 agp_bridge.aperture_sizes = (void *) intel_generic_sizes;
1224 agp_bridge.size_type = U16_APER_SIZE;
1225 agp_bridge.num_aperture_sizes = 7;
1226 agp_bridge.dev_private_data = NULL;
1227 agp_bridge.needs_scratch_page = FALSE;
1228 agp_bridge.configure = intel_configure;
1229 agp_bridge.fetch_size = intel_fetch_size;
1230 agp_bridge.cleanup = intel_cleanup;
1231 agp_bridge.tlb_flush = intel_tlbflush;
1232 agp_bridge.mask_memory = intel_mask_memory;
1233 agp_bridge.agp_enable = agp_generic_agp_enable;
1234 agp_bridge.cache_flush = global_cache_flush;
1235 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1236 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1237 agp_bridge.insert_memory = agp_generic_insert_memory;
1238 agp_bridge.remove_memory = agp_generic_remove_memory;
1239 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1240 agp_bridge.free_by_type = agp_generic_free_by_type;
1242 return 0;
1244 (void) pdev; /* unused */
1247 static int __init intel_840_setup (struct pci_dev *pdev)
1249 agp_bridge.masks = intel_generic_masks;
1250 agp_bridge.num_of_masks = 1;
1251 agp_bridge.aperture_sizes = (void *) intel_generic_sizes;
1252 agp_bridge.size_type = U16_APER_SIZE;
1253 agp_bridge.num_aperture_sizes = 7;
1254 agp_bridge.dev_private_data = NULL;
1255 agp_bridge.needs_scratch_page = FALSE;
1256 agp_bridge.configure = intel_840_configure;
1257 agp_bridge.fetch_size = intel_fetch_size;
1258 agp_bridge.cleanup = intel_cleanup;
1259 agp_bridge.tlb_flush = intel_tlbflush;
1260 agp_bridge.mask_memory = intel_mask_memory;
1261 agp_bridge.agp_enable = agp_generic_agp_enable;
1262 agp_bridge.cache_flush = global_cache_flush;
1263 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1264 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1265 agp_bridge.insert_memory = agp_generic_insert_memory;
1266 agp_bridge.remove_memory = agp_generic_remove_memory;
1267 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1268 agp_bridge.free_by_type = agp_generic_free_by_type;
1270 return 0;
1272 (void) pdev; /* unused */
1275 #endif /* CONFIG_AGP_INTEL */
1277 #ifdef CONFIG_AGP_VIA
1279 static int via_fetch_size(void)
1281 int i;
1282 u8 temp;
1283 aper_size_info_8 *values;
1285 values = A_SIZE_8(agp_bridge.aperture_sizes);
1286 pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp);
1287 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1288 if (temp == values[i].size_value) {
1289 agp_bridge.previous_size =
1290 agp_bridge.current_size = (void *) (values + i);
1291 agp_bridge.aperture_size_idx = i;
1292 return values[i].size;
1296 return 0;
1299 static int via_configure(void)
1301 u32 temp;
1302 aper_size_info_8 *current_size;
1304 current_size = A_SIZE_8(agp_bridge.current_size);
1305 /* aperture size */
1306 pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
1307 current_size->size_value);
1308 /* address to map too */
1309 pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp);
1310 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1312 /* GART control register */
1313 pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
1315 /* attbase - aperture GATT base */
1316 pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE,
1317 (agp_bridge.gatt_bus_addr & 0xfffff000) | 3);
1318 return 0;
1321 static void via_cleanup(void)
1323 aper_size_info_8 *previous_size;
1325 previous_size = A_SIZE_8(agp_bridge.previous_size);
1326 pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, 0);
1327 pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
1328 previous_size->size_value);
1331 static void via_tlbflush(agp_memory * mem)
1333 pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000008f);
1334 pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
1337 static unsigned long via_mask_memory(unsigned long addr, int type)
1339 /* Memory type is ignored */
1341 return addr | agp_bridge.masks[0].mask;
1344 static aper_size_info_8 via_generic_sizes[7] =
1346 {256, 65536, 6, 0},
1347 {128, 32768, 5, 128},
1348 {64, 16384, 4, 192},
1349 {32, 8192, 3, 224},
1350 {16, 4096, 2, 240},
1351 {8, 2048, 1, 248},
1352 {4, 1024, 0, 252}
1355 static gatt_mask via_generic_masks[] =
1357 {0x00000000, 0}
1360 static int __init via_generic_setup (struct pci_dev *pdev)
1362 agp_bridge.masks = via_generic_masks;
1363 agp_bridge.num_of_masks = 1;
1364 agp_bridge.aperture_sizes = (void *) via_generic_sizes;
1365 agp_bridge.size_type = U8_APER_SIZE;
1366 agp_bridge.num_aperture_sizes = 7;
1367 agp_bridge.dev_private_data = NULL;
1368 agp_bridge.needs_scratch_page = FALSE;
1369 agp_bridge.configure = via_configure;
1370 agp_bridge.fetch_size = via_fetch_size;
1371 agp_bridge.cleanup = via_cleanup;
1372 agp_bridge.tlb_flush = via_tlbflush;
1373 agp_bridge.mask_memory = via_mask_memory;
1374 agp_bridge.agp_enable = agp_generic_agp_enable;
1375 agp_bridge.cache_flush = global_cache_flush;
1376 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1377 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1378 agp_bridge.insert_memory = agp_generic_insert_memory;
1379 agp_bridge.remove_memory = agp_generic_remove_memory;
1380 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1381 agp_bridge.free_by_type = agp_generic_free_by_type;
1383 return 0;
1385 (void) pdev; /* unused */
1388 #endif /* CONFIG_AGP_VIA */
1390 #ifdef CONFIG_AGP_SIS
1392 static int sis_fetch_size(void)
1394 u8 temp_size;
1395 int i;
1396 aper_size_info_8 *values;
1398 pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size);
1399 values = A_SIZE_8(agp_bridge.aperture_sizes);
1400 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1401 if ((temp_size == values[i].size_value) ||
1402 ((temp_size & ~(0x03)) ==
1403 (values[i].size_value & ~(0x03)))) {
1404 agp_bridge.previous_size =
1405 agp_bridge.current_size = (void *) (values + i);
1407 agp_bridge.aperture_size_idx = i;
1408 return values[i].size;
1412 return 0;
1416 static void sis_tlbflush(agp_memory * mem)
1418 pci_write_config_byte(agp_bridge.dev, SIS_TLBFLUSH, 0x02);
1421 static int sis_configure(void)
1423 u32 temp;
1424 aper_size_info_8 *current_size;
1426 current_size = A_SIZE_8(agp_bridge.current_size);
1427 pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05);
1428 pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp);
1429 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1430 pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE,
1431 agp_bridge.gatt_bus_addr);
1432 pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
1433 current_size->size_value);
1434 return 0;
1437 static void sis_cleanup(void)
1439 aper_size_info_8 *previous_size;
1441 previous_size = A_SIZE_8(agp_bridge.previous_size);
1442 pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
1443 (previous_size->size_value & ~(0x03)));
1446 static unsigned long sis_mask_memory(unsigned long addr, int type)
1448 /* Memory type is ignored */
1450 return addr | agp_bridge.masks[0].mask;
1453 static aper_size_info_8 sis_generic_sizes[7] =
1455 {256, 65536, 6, 99},
1456 {128, 32768, 5, 83},
1457 {64, 16384, 4, 67},
1458 {32, 8192, 3, 51},
1459 {16, 4096, 2, 35},
1460 {8, 2048, 1, 19},
1461 {4, 1024, 0, 3}
1464 static gatt_mask sis_generic_masks[] =
1466 {0x00000000, 0}
1469 static int __init sis_generic_setup (struct pci_dev *pdev)
1471 agp_bridge.masks = sis_generic_masks;
1472 agp_bridge.num_of_masks = 1;
1473 agp_bridge.aperture_sizes = (void *) sis_generic_sizes;
1474 agp_bridge.size_type = U8_APER_SIZE;
1475 agp_bridge.num_aperture_sizes = 7;
1476 agp_bridge.dev_private_data = NULL;
1477 agp_bridge.needs_scratch_page = FALSE;
1478 agp_bridge.configure = sis_configure;
1479 agp_bridge.fetch_size = sis_fetch_size;
1480 agp_bridge.cleanup = sis_cleanup;
1481 agp_bridge.tlb_flush = sis_tlbflush;
1482 agp_bridge.mask_memory = sis_mask_memory;
1483 agp_bridge.agp_enable = agp_generic_agp_enable;
1484 agp_bridge.cache_flush = global_cache_flush;
1485 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1486 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1487 agp_bridge.insert_memory = agp_generic_insert_memory;
1488 agp_bridge.remove_memory = agp_generic_remove_memory;
1489 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1490 agp_bridge.free_by_type = agp_generic_free_by_type;
1492 return 0;
1495 #endif /* CONFIG_AGP_SIS */
1497 #ifdef CONFIG_AGP_AMD
1499 typedef struct _amd_page_map {
1500 unsigned long *real;
1501 unsigned long *remapped;
1502 } amd_page_map;
1504 static struct _amd_irongate_private {
1505 volatile u8 *registers;
1506 amd_page_map **gatt_pages;
1507 int num_tables;
1508 } amd_irongate_private;
1510 static int amd_create_page_map(amd_page_map *page_map)
1512 int i;
1514 page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
1515 if (page_map->real == NULL) {
1516 return -ENOMEM;
1518 set_bit(PG_reserved, &virt_to_page(page_map->real)->flags);
1519 CACHE_FLUSH();
1520 page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
1521 PAGE_SIZE);
1522 if (page_map->remapped == NULL) {
1523 clear_bit(PG_reserved,
1524 &virt_to_page(page_map->real)->flags);
1525 free_page((unsigned long) page_map->real);
1526 page_map->real = NULL;
1527 return -ENOMEM;
1529 CACHE_FLUSH();
1531 for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
1532 page_map->remapped[i] = agp_bridge.scratch_page;
1535 return 0;
1538 static void amd_free_page_map(amd_page_map *page_map)
1540 iounmap(page_map->remapped);
1541 clear_bit(PG_reserved,
1542 &virt_to_page(page_map->real)->flags);
1543 free_page((unsigned long) page_map->real);
1546 static void amd_free_gatt_pages(void)
1548 int i;
1549 amd_page_map **tables;
1550 amd_page_map *entry;
1552 tables = amd_irongate_private.gatt_pages;
1553 for(i = 0; i < amd_irongate_private.num_tables; i++) {
1554 entry = tables[i];
1555 if (entry != NULL) {
1556 if (entry->real != NULL) {
1557 amd_free_page_map(entry);
1559 kfree(entry);
1562 kfree(tables);
1565 static int amd_create_gatt_pages(int nr_tables)
1567 amd_page_map **tables;
1568 amd_page_map *entry;
1569 int retval = 0;
1570 int i;
1572 tables = kmalloc((nr_tables + 1) * sizeof(amd_page_map *),
1573 GFP_KERNEL);
1574 if (tables == NULL) {
1575 return -ENOMEM;
1577 memset(tables, 0, sizeof(amd_page_map *) * (nr_tables + 1));
1578 for (i = 0; i < nr_tables; i++) {
1579 entry = kmalloc(sizeof(amd_page_map), GFP_KERNEL);
1580 if (entry == NULL) {
1581 retval = -ENOMEM;
1582 break;
1584 memset(entry, 0, sizeof(amd_page_map));
1585 tables[i] = entry;
1586 retval = amd_create_page_map(entry);
1587 if (retval != 0) break;
1589 amd_irongate_private.num_tables = nr_tables;
1590 amd_irongate_private.gatt_pages = tables;
1592 if (retval != 0) amd_free_gatt_pages();
1594 return retval;
1597 /* Since we don't need contigious memory we just try
1598 * to get the gatt table once
1601 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
1602 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
1603 GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr))
1604 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
1605 #define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
1606 GET_PAGE_DIR_IDX(addr)]->remapped)
1608 static int amd_create_gatt_table(void)
1610 aper_size_info_lvl2 *value;
1611 amd_page_map page_dir;
1612 unsigned long addr;
1613 int retval;
1614 u32 temp;
1615 int i;
1617 value = A_SIZE_LVL2(agp_bridge.current_size);
1618 retval = amd_create_page_map(&page_dir);
1619 if (retval != 0) {
1620 return retval;
1623 retval = amd_create_gatt_pages(value->num_entries / 1024);
1624 if (retval != 0) {
1625 amd_free_page_map(&page_dir);
1626 return retval;
1629 agp_bridge.gatt_table_real = page_dir.real;
1630 agp_bridge.gatt_table = page_dir.remapped;
1631 agp_bridge.gatt_bus_addr = virt_to_bus(page_dir.real);
1633 /* Get the address for the gart region.
1634 * This is a bus address even on the alpha, b/c its
1635 * used to program the agp master not the cpu
1638 pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp);
1639 addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1640 agp_bridge.gart_bus_addr = addr;
1642 /* Calculate the agp offset */
1643 for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
1644 page_dir.remapped[GET_PAGE_DIR_OFF(addr)] =
1645 virt_to_bus(amd_irongate_private.gatt_pages[i]->real);
1646 page_dir.remapped[GET_PAGE_DIR_OFF(addr)] |= 0x00000001;
1649 return 0;
1652 static int amd_free_gatt_table(void)
1654 amd_page_map page_dir;
1656 page_dir.real = agp_bridge.gatt_table_real;
1657 page_dir.remapped = agp_bridge.gatt_table;
1659 amd_free_gatt_pages();
1660 amd_free_page_map(&page_dir);
1661 return 0;
1664 static int amd_irongate_fetch_size(void)
1666 int i;
1667 u32 temp;
1668 aper_size_info_lvl2 *values;
1670 pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
1671 temp = (temp & 0x0000000e);
1672 values = A_SIZE_LVL2(agp_bridge.aperture_sizes);
1673 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1674 if (temp == values[i].size_value) {
1675 agp_bridge.previous_size =
1676 agp_bridge.current_size = (void *) (values + i);
1678 agp_bridge.aperture_size_idx = i;
1679 return values[i].size;
1683 return 0;
1686 static int amd_irongate_configure(void)
1688 aper_size_info_lvl2 *current_size;
1689 u32 temp;
1690 u16 enable_reg;
1692 current_size = A_SIZE_LVL2(agp_bridge.current_size);
1694 /* Get the memory mapped registers */
1695 pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp);
1696 temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1697 amd_irongate_private.registers = (volatile u8 *) ioremap(temp, 4096);
1699 /* Write out the address of the gatt table */
1700 OUTREG32(amd_irongate_private.registers, AMD_ATTBASE,
1701 agp_bridge.gatt_bus_addr);
1703 /* Write the Sync register */
1704 pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80);
1706 /* Set indexing mode */
1707 pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL2, 0x00);
1709 /* Write the enable register */
1710 enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
1711 enable_reg = (enable_reg | 0x0004);
1712 OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
1714 /* Write out the size register */
1715 pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
1716 temp = (((temp & ~(0x0000000e)) | current_size->size_value)
1717 | 0x00000001);
1718 pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
1720 /* Flush the tlb */
1721 OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
1723 return 0;
1726 static void amd_irongate_cleanup(void)
1728 aper_size_info_lvl2 *previous_size;
1729 u32 temp;
1730 u16 enable_reg;
1732 previous_size = A_SIZE_LVL2(agp_bridge.previous_size);
1734 enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
1735 enable_reg = (enable_reg & ~(0x0004));
1736 OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
1738 /* Write back the previous size and disable gart translation */
1739 pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
1740 temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
1741 pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
1742 iounmap((void *) amd_irongate_private.registers);
1746 * This routine could be implemented by taking the addresses
1747 * written to the GATT, and flushing them individually. However
1748 * currently it just flushes the whole table. Which is probably
1749 * more efficent, since agp_memory blocks can be a large number of
1750 * entries.
1753 static void amd_irongate_tlbflush(agp_memory * temp)
1755 OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
1758 static unsigned long amd_irongate_mask_memory(unsigned long addr, int type)
1760 /* Only type 0 is supported by the irongate */
1762 return addr | agp_bridge.masks[0].mask;
1765 static int amd_insert_memory(agp_memory * mem,
1766 off_t pg_start, int type)
1768 int i, j, num_entries;
1769 unsigned long *cur_gatt;
1770 unsigned long addr;
1772 num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries;
1774 if (type != 0 || mem->type != 0) {
1775 return -EINVAL;
1777 if ((pg_start + mem->page_count) > num_entries) {
1778 return -EINVAL;
1781 j = pg_start;
1782 while (j < (pg_start + mem->page_count)) {
1783 addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
1784 cur_gatt = GET_GATT(addr);
1785 if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) {
1786 return -EBUSY;
1788 j++;
1791 if (mem->is_flushed == FALSE) {
1792 CACHE_FLUSH();
1793 mem->is_flushed = TRUE;
1796 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1797 addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
1798 cur_gatt = GET_GATT(addr);
1799 cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i];
1801 agp_bridge.tlb_flush(mem);
1802 return 0;
1805 static int amd_remove_memory(agp_memory * mem, off_t pg_start,
1806 int type)
1808 int i;
1809 unsigned long *cur_gatt;
1810 unsigned long addr;
1812 if (type != 0 || mem->type != 0) {
1813 return -EINVAL;
1815 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1816 addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr;
1817 cur_gatt = GET_GATT(addr);
1818 cur_gatt[GET_GATT_OFF(addr)] =
1819 (unsigned long) agp_bridge.scratch_page;
1822 agp_bridge.tlb_flush(mem);
1823 return 0;
1826 static aper_size_info_lvl2 amd_irongate_sizes[7] =
1828 {2048, 524288, 0x0000000c},
1829 {1024, 262144, 0x0000000a},
1830 {512, 131072, 0x00000008},
1831 {256, 65536, 0x00000006},
1832 {128, 32768, 0x00000004},
1833 {64, 16384, 0x00000002},
1834 {32, 8192, 0x00000000}
1837 static gatt_mask amd_irongate_masks[] =
1839 {0x00000001, 0}
1842 static int __init amd_irongate_setup (struct pci_dev *pdev)
1844 agp_bridge.masks = amd_irongate_masks;
1845 agp_bridge.num_of_masks = 1;
1846 agp_bridge.aperture_sizes = (void *) amd_irongate_sizes;
1847 agp_bridge.size_type = LVL2_APER_SIZE;
1848 agp_bridge.num_aperture_sizes = 7;
1849 agp_bridge.dev_private_data = (void *) &amd_irongate_private;
1850 agp_bridge.needs_scratch_page = FALSE;
1851 agp_bridge.configure = amd_irongate_configure;
1852 agp_bridge.fetch_size = amd_irongate_fetch_size;
1853 agp_bridge.cleanup = amd_irongate_cleanup;
1854 agp_bridge.tlb_flush = amd_irongate_tlbflush;
1855 agp_bridge.mask_memory = amd_irongate_mask_memory;
1856 agp_bridge.agp_enable = agp_generic_agp_enable;
1857 agp_bridge.cache_flush = global_cache_flush;
1858 agp_bridge.create_gatt_table = amd_create_gatt_table;
1859 agp_bridge.free_gatt_table = amd_free_gatt_table;
1860 agp_bridge.insert_memory = amd_insert_memory;
1861 agp_bridge.remove_memory = amd_remove_memory;
1862 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1863 agp_bridge.free_by_type = agp_generic_free_by_type;
1865 return 0;
1867 (void) pdev; /* unused */
1870 #endif /* CONFIG_AGP_AMD */
1872 #ifdef CONFIG_AGP_ALI
1874 static int ali_fetch_size(void)
1876 int i;
1877 u32 temp;
1878 aper_size_info_32 *values;
1880 pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
1881 temp &= ~(0xfffffff0);
1882 values = A_SIZE_32(agp_bridge.aperture_sizes);
1884 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1885 if (temp == values[i].size_value) {
1886 agp_bridge.previous_size =
1887 agp_bridge.current_size = (void *) (values + i);
1888 agp_bridge.aperture_size_idx = i;
1889 return values[i].size;
1893 return 0;
1896 static void ali_tlbflush(agp_memory * mem)
1898 u32 temp;
1900 pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
1901 pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
1902 ((temp & 0xffffff00) | 0x00000090));
1903 pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
1904 ((temp & 0xffffff00) | 0x00000010));
1907 static void ali_cleanup(void)
1909 aper_size_info_32 *previous_size;
1910 u32 temp;
1912 previous_size = A_SIZE_32(agp_bridge.previous_size);
1914 pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
1915 pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
1916 ((temp & 0xffffff00) | 0x00000090));
1917 pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
1918 previous_size->size_value);
1921 static int ali_configure(void)
1923 u32 temp;
1924 aper_size_info_32 *current_size;
1926 current_size = A_SIZE_32(agp_bridge.current_size);
1928 /* aperture size and gatt addr */
1929 pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
1930 agp_bridge.gatt_bus_addr | current_size->size_value);
1932 /* tlb control */
1933 pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
1934 pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
1935 ((temp & 0xffffff00) | 0x00000010));
1937 /* address to map to */
1938 pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp);
1939 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1940 return 0;
1943 static unsigned long ali_mask_memory(unsigned long addr, int type)
1945 /* Memory type is ignored */
1947 return addr | agp_bridge.masks[0].mask;
1951 /* Setup function */
1952 static gatt_mask ali_generic_masks[] =
1954 {0x00000000, 0}
1957 static aper_size_info_32 ali_generic_sizes[7] =
1959 {256, 65536, 6, 10},
1960 {128, 32768, 5, 9},
1961 {64, 16384, 4, 8},
1962 {32, 8192, 3, 7},
1963 {16, 4096, 2, 6},
1964 {8, 2048, 1, 4},
1965 {4, 1024, 0, 3}
1968 static int __init ali_generic_setup (struct pci_dev *pdev)
1970 agp_bridge.masks = ali_generic_masks;
1971 agp_bridge.num_of_masks = 1;
1972 agp_bridge.aperture_sizes = (void *) ali_generic_sizes;
1973 agp_bridge.size_type = U32_APER_SIZE;
1974 agp_bridge.num_aperture_sizes = 7;
1975 agp_bridge.dev_private_data = NULL;
1976 agp_bridge.needs_scratch_page = FALSE;
1977 agp_bridge.configure = ali_configure;
1978 agp_bridge.fetch_size = ali_fetch_size;
1979 agp_bridge.cleanup = ali_cleanup;
1980 agp_bridge.tlb_flush = ali_tlbflush;
1981 agp_bridge.mask_memory = ali_mask_memory;
1982 agp_bridge.agp_enable = agp_generic_agp_enable;
1983 agp_bridge.cache_flush = global_cache_flush;
1984 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1985 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1986 agp_bridge.insert_memory = agp_generic_insert_memory;
1987 agp_bridge.remove_memory = agp_generic_remove_memory;
1988 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1989 agp_bridge.free_by_type = agp_generic_free_by_type;
1991 return 0;
1993 (void) pdev; /* unused */
1996 #endif /* CONFIG_AGP_ALI */
1999 /* per-chipset initialization data.
2000 * note -- all chipsets for a single vendor MUST be grouped together
2002 static struct {
2003 unsigned short device_id; /* first, to make table easier to read */
2004 unsigned short vendor_id;
2005 enum chipset_type chipset;
2006 const char *vendor_name;
2007 const char *chipset_name;
2008 int (*chipset_setup) (struct pci_dev *pdev);
2009 } agp_bridge_info[] __initdata = {
2011 #ifdef CONFIG_AGP_ALI
2012 { PCI_DEVICE_ID_AL_M1541_0,
2013 PCI_VENDOR_ID_AL,
2014 ALI_M1541,
2015 "Ali",
2016 "M1541",
2017 ali_generic_setup },
2018 { 0,
2019 PCI_VENDOR_ID_AL,
2020 ALI_GENERIC,
2021 "Ali",
2022 "Generic",
2023 ali_generic_setup },
2024 #endif /* CONFIG_AGP_ALI */
2026 #ifdef CONFIG_AGP_AMD
2027 { PCI_DEVICE_ID_AMD_IRONGATE_0,
2028 PCI_VENDOR_ID_AMD,
2029 AMD_IRONGATE,
2030 "AMD",
2031 "Irongate",
2032 amd_irongate_setup },
2033 { 0,
2034 PCI_VENDOR_ID_AMD,
2035 AMD_GENERIC,
2036 "AMD",
2037 "Generic",
2038 amd_irongate_setup },
2039 #endif /* CONFIG_AGP_AMD */
2041 #ifdef CONFIG_AGP_INTEL
2042 { PCI_DEVICE_ID_INTEL_82443LX_0,
2043 PCI_VENDOR_ID_INTEL,
2044 INTEL_LX,
2045 "Intel",
2046 "440LX",
2047 intel_generic_setup },
2048 { PCI_DEVICE_ID_INTEL_82443BX_0,
2049 PCI_VENDOR_ID_INTEL,
2050 INTEL_BX,
2051 "Intel",
2052 "440BX",
2053 intel_generic_setup },
2054 { PCI_DEVICE_ID_INTEL_82443GX_0,
2055 PCI_VENDOR_ID_INTEL,
2056 INTEL_GX,
2057 "Intel",
2058 "440GX",
2059 intel_generic_setup },
2060 { PCI_DEVICE_ID_INTEL_840_0,
2061 PCI_VENDOR_ID_INTEL,
2062 INTEL_I840,
2063 "Intel",
2064 "i840",
2065 intel_840_setup },
2066 { 0,
2067 PCI_VENDOR_ID_INTEL,
2068 INTEL_GENERIC,
2069 "Intel",
2070 "Generic",
2071 intel_generic_setup },
2072 #endif /* CONFIG_AGP_INTEL */
2074 #ifdef CONFIG_AGP_SIS
2075 { PCI_DEVICE_ID_SI_630,
2076 PCI_VENDOR_ID_SI,
2077 SIS_GENERIC,
2078 "SiS",
2079 "630",
2080 sis_generic_setup },
2081 { PCI_DEVICE_ID_SI_540,
2082 PCI_VENDOR_ID_SI,
2083 SIS_GENERIC,
2084 "SiS",
2085 "540",
2086 sis_generic_setup },
2087 { PCI_DEVICE_ID_SI_620,
2088 PCI_VENDOR_ID_SI,
2089 SIS_GENERIC,
2090 "SiS",
2091 "620",
2092 sis_generic_setup },
2093 { PCI_DEVICE_ID_SI_530,
2094 PCI_VENDOR_ID_SI,
2095 SIS_GENERIC,
2096 "SiS",
2097 "530",
2098 sis_generic_setup },
2099 { PCI_DEVICE_ID_SI_630,
2100 PCI_VENDOR_ID_SI,
2101 SIS_GENERIC,
2102 "SiS",
2103 "Generic",
2104 sis_generic_setup },
2105 { PCI_DEVICE_ID_SI_540,
2106 PCI_VENDOR_ID_SI,
2107 SIS_GENERIC,
2108 "SiS",
2109 "Generic",
2110 sis_generic_setup },
2111 { PCI_DEVICE_ID_SI_620,
2112 PCI_VENDOR_ID_SI,
2113 SIS_GENERIC,
2114 "SiS",
2115 "Generic",
2116 sis_generic_setup },
2117 { PCI_DEVICE_ID_SI_530,
2118 PCI_VENDOR_ID_SI,
2119 SIS_GENERIC,
2120 "SiS",
2121 "Generic",
2122 sis_generic_setup },
2123 { 0,
2124 PCI_VENDOR_ID_SI,
2125 SIS_GENERIC,
2126 "SiS",
2127 "Generic",
2128 sis_generic_setup },
2129 #endif /* CONFIG_AGP_SIS */
2131 #ifdef CONFIG_AGP_VIA
2132 { PCI_DEVICE_ID_VIA_8501_0,
2133 PCI_VENDOR_ID_VIA,
2134 VIA_MVP4,
2135 "Via",
2136 "MVP4",
2137 via_generic_setup },
2138 { PCI_DEVICE_ID_VIA_82C597_0,
2139 PCI_VENDOR_ID_VIA,
2140 VIA_VP3,
2141 "Via",
2142 "VP3",
2143 via_generic_setup },
2144 { PCI_DEVICE_ID_VIA_82C598_0,
2145 PCI_VENDOR_ID_VIA,
2146 VIA_MVP3,
2147 "Via",
2148 "MVP3",
2149 via_generic_setup },
2150 { PCI_DEVICE_ID_VIA_82C691_0,
2151 PCI_VENDOR_ID_VIA,
2152 VIA_APOLLO_PRO,
2153 "Via",
2154 "Apollo Pro",
2155 via_generic_setup },
2156 { PCI_DEVICE_ID_VIA_8371_0,
2157 PCI_VENDOR_ID_VIA,
2158 VIA_APOLLO_KX133,
2159 "Via",
2160 "Apollo Pro KX133",
2161 via_generic_setup },
2162 { PCI_DEVICE_ID_VIA_8363_0,
2163 PCI_VENDOR_ID_VIA,
2164 VIA_APOLLO_KT133,
2165 "Via",
2166 "Apollo Pro KT133",
2167 via_generic_setup },
2168 { 0,
2169 PCI_VENDOR_ID_VIA,
2170 VIA_GENERIC,
2171 "Via",
2172 "Generic",
2173 via_generic_setup },
2174 #endif /* CONFIG_AGP_VIA */
2176 { 0, }, /* dummy final entry, always present */
2180 /* scan table above for supported devices */
2181 static int __init agp_lookup_host_bridge (struct pci_dev *pdev)
2183 int i;
2185 for (i = 0; i < arraysize (agp_bridge_info); i++)
2186 if (pdev->vendor == agp_bridge_info[i].vendor_id)
2187 break;
2189 if (i >= arraysize (agp_bridge_info)) {
2190 printk (KERN_DEBUG PFX "unsupported bridge\n");
2191 return -ENODEV;
2194 while ((i < arraysize (agp_bridge_info)) &&
2195 (agp_bridge_info[i].vendor_id == pdev->vendor)) {
2196 if (pdev->device == agp_bridge_info[i].device_id) {
2197 printk (KERN_INFO PFX "Detected %s %s chipset\n",
2198 agp_bridge_info[i].vendor_name,
2199 agp_bridge_info[i].chipset_name);
2200 agp_bridge.type = agp_bridge_info[i].chipset;
2201 return agp_bridge_info[i].chipset_setup (pdev);
2204 i++;
2207 i--; /* point to vendor generic entry (device_id == 0) */
2209 /* try init anyway, if user requests it AND
2210 * there is a 'generic' bridge entry for this vendor */
2211 if (agp_try_unsupported && agp_bridge_info[i].device_id == 0) {
2212 printk(KERN_WARNING PFX "Trying generic %s routines"
2213 " for device id: %04x\n",
2214 agp_bridge_info[i].vendor_name, pdev->device);
2215 agp_bridge.type = agp_bridge_info[i].chipset;
2216 return agp_bridge_info[i].chipset_setup (pdev);
2219 printk(KERN_ERR PFX "Unsupported %s chipset (device id: %04x),"
2220 " you might want to try agp_try_unsupported=1.\n",
2221 agp_bridge_info[i].vendor_name, pdev->device);
2222 return -ENODEV;
2226 /* Supported Device Scanning routine */
2228 static int __init agp_find_supported_device(void)
2230 struct pci_dev *dev = NULL;
2231 u8 cap_ptr = 0x00;
2232 u32 cap_id, scratch;
2234 if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL)
2235 return -ENODEV;
2237 agp_bridge.dev = dev;
2239 /* Need to test for I810 here */
2240 #ifdef CONFIG_AGP_I810
2241 if (dev->vendor == PCI_VENDOR_ID_INTEL) {
2242 struct pci_dev *i810_dev;
2244 switch (dev->device) {
2245 case PCI_DEVICE_ID_INTEL_810_0:
2246 i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
2247 PCI_DEVICE_ID_INTEL_810_1,
2248 NULL);
2249 if (i810_dev == NULL) {
2250 printk(KERN_ERR PFX "Detected an Intel i810,"
2251 " but could not find the secondary"
2252 " device.\n");
2253 return -ENODEV;
2255 printk(KERN_INFO PFX "Detected an Intel "
2256 "i810 Chipset.\n");
2257 agp_bridge.type = INTEL_I810;
2258 return intel_i810_setup (i810_dev);
2260 case PCI_DEVICE_ID_INTEL_810_DC100_0:
2261 i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
2262 PCI_DEVICE_ID_INTEL_810_DC100_1,
2263 NULL);
2264 if (i810_dev == NULL) {
2265 printk(KERN_ERR PFX "Detected an Intel i810 "
2266 "DC100, but could not find the "
2267 "secondary device.\n");
2268 return -ENODEV;
2270 printk(KERN_INFO PFX "Detected an Intel i810 "
2271 "DC100 Chipset.\n");
2272 agp_bridge.type = INTEL_I810;
2273 return intel_i810_setup(i810_dev);
2275 case PCI_DEVICE_ID_INTEL_810_E_0:
2276 i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
2277 PCI_DEVICE_ID_INTEL_810_E_1,
2278 NULL);
2279 if (i810_dev == NULL) {
2280 printk(KERN_ERR PFX "Detected an Intel i810 E"
2281 ", but could not find the secondary "
2282 "device.\n");
2283 return -ENODEV;
2285 printk(KERN_INFO PFX "Detected an Intel i810 E "
2286 "Chipset.\n");
2287 agp_bridge.type = INTEL_I810;
2288 return intel_i810_setup(i810_dev);
2290 case PCI_DEVICE_ID_INTEL_815_0:
2291 /* The i815 can operate either as an i810 style
2292 * integrated device, or as an AGP4X motherboard.
2294 * This only addresses the first mode:
2296 i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
2297 PCI_DEVICE_ID_INTEL_815_1,
2298 NULL);
2299 if (i810_dev == NULL) {
2300 printk(KERN_ERR PFX "agpgart: Detected an "
2301 "Intel i815, but could not find the"
2302 " secondary device.\n");
2303 agp_bridge.type = NOT_SUPPORTED;
2304 return -ENODEV;
2306 printk(KERN_INFO PFX "agpgart: Detected an Intel i815 "
2307 "Chipset.\n");
2308 agp_bridge.type = INTEL_I810;
2309 return intel_i810_setup(i810_dev);
2311 default:
2312 break;
2315 #endif /* CONFIG_AGP_I810 */
2317 /* find capndx */
2318 pci_read_config_dword(dev, 0x04, &scratch);
2319 if (!(scratch & 0x00100000))
2320 return -ENODEV;
2322 pci_read_config_byte(dev, 0x34, &cap_ptr);
2323 if (cap_ptr != 0x00) {
2324 do {
2325 pci_read_config_dword(dev, cap_ptr, &cap_id);
2327 if ((cap_id & 0xff) != 0x02)
2328 cap_ptr = (cap_id >> 8) & 0xff;
2330 while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
2332 if (cap_ptr == 0x00)
2333 return -ENODEV;
2334 agp_bridge.capndx = cap_ptr;
2336 /* Fill in the mode register */
2337 pci_read_config_dword(agp_bridge.dev,
2338 agp_bridge.capndx + 4,
2339 &agp_bridge.mode);
2341 /* probe for known chipsets */
2342 return agp_lookup_host_bridge (dev);
2345 struct agp_max_table {
2346 int mem;
2347 int agp;
2350 static struct agp_max_table maxes_table[9] __initdata =
2352 {0, 0},
2353 {32, 4},
2354 {64, 28},
2355 {128, 96},
2356 {256, 204},
2357 {512, 440},
2358 {1024, 942},
2359 {2048, 1920},
2360 {4096, 3932}
2363 static int __init agp_find_max (void)
2365 long memory, index, result;
2367 memory = virt_to_phys(high_memory) >> 20;
2368 index = 1;
2370 while ((memory > maxes_table[index].mem) &&
2371 (index < 8)) {
2372 index++;
2375 result = maxes_table[index - 1].agp +
2376 ( (memory - maxes_table[index - 1].mem) *
2377 (maxes_table[index].agp - maxes_table[index - 1].agp)) /
2378 (maxes_table[index].mem - maxes_table[index - 1].mem);
2380 printk(KERN_INFO PFX "Maximum main memory to use "
2381 "for agp memory: %ldM\n", result);
2382 result = result << (20 - PAGE_SHIFT);
2383 return result;
2386 #define AGPGART_VERSION_MAJOR 0
2387 #define AGPGART_VERSION_MINOR 99
2389 static agp_version agp_current_version =
2391 AGPGART_VERSION_MAJOR,
2392 AGPGART_VERSION_MINOR
2395 static int __init agp_backend_initialize(void)
2397 int size_value, rc, got_gatt=0, got_keylist=0;
2399 memset(&agp_bridge, 0, sizeof(struct agp_bridge_data));
2400 agp_bridge.type = NOT_SUPPORTED;
2401 agp_bridge.max_memory_agp = agp_find_max();
2402 agp_bridge.version = &agp_current_version;
2404 rc = agp_find_supported_device();
2405 if (rc) {
2406 /* not KERN_ERR because error msg should have already printed */
2407 printk(KERN_DEBUG PFX "no supported devices found.\n");
2408 return rc;
2411 if (agp_bridge.needs_scratch_page == TRUE) {
2412 agp_bridge.scratch_page = agp_alloc_page();
2414 if (agp_bridge.scratch_page == 0) {
2415 printk(KERN_ERR PFX "unable to get memory for "
2416 "scratch page.\n");
2417 return -ENOMEM;
2419 agp_bridge.scratch_page =
2420 virt_to_phys((void *) agp_bridge.scratch_page);
2421 agp_bridge.scratch_page =
2422 agp_bridge.mask_memory(agp_bridge.scratch_page, 0);
2425 size_value = agp_bridge.fetch_size();
2427 if (size_value == 0) {
2428 printk(KERN_ERR PFX "unable to detrimine aperture size.\n");
2429 rc = -EINVAL;
2430 goto err_out;
2432 if (agp_bridge.create_gatt_table()) {
2433 printk(KERN_ERR PFX "unable to get memory for graphics "
2434 "translation table.\n");
2435 rc = -ENOMEM;
2436 goto err_out;
2438 got_gatt = 1;
2440 agp_bridge.key_list = vmalloc(PAGE_SIZE * 4);
2441 if (agp_bridge.key_list == NULL) {
2442 printk(KERN_ERR PFX "error allocating memory for key lists.\n");
2443 rc = -ENOMEM;
2444 goto err_out;
2446 got_keylist = 1;
2448 /* FIXME vmalloc'd memory not guaranteed contiguous */
2449 memset(agp_bridge.key_list, 0, PAGE_SIZE * 4);
2451 if (agp_bridge.configure()) {
2452 printk(KERN_ERR PFX "error configuring host chipset.\n");
2453 rc = -EINVAL;
2454 goto err_out;
2457 printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n",
2458 size_value, agp_bridge.gart_bus_addr);
2460 return 0;
2462 err_out:
2463 if (agp_bridge.needs_scratch_page == TRUE) {
2464 agp_bridge.scratch_page &= ~(0x00000fff);
2465 agp_destroy_page((unsigned long)
2466 phys_to_virt(agp_bridge.scratch_page));
2468 if (got_gatt)
2469 agp_bridge.free_gatt_table();
2470 if (got_keylist)
2471 vfree(agp_bridge.key_list);
2472 return rc;
2476 /* cannot be __exit b/c as it could be called from __init code */
2477 static void agp_backend_cleanup(void)
2479 agp_bridge.cleanup();
2480 agp_bridge.free_gatt_table();
2481 vfree(agp_bridge.key_list);
2483 if (agp_bridge.needs_scratch_page == TRUE) {
2484 agp_bridge.scratch_page &= ~(0x00000fff);
2485 agp_destroy_page((unsigned long)
2486 phys_to_virt(agp_bridge.scratch_page));
2490 extern int agp_frontend_initialize(void);
2491 extern void agp_frontend_cleanup(void);
2493 static int __init agp_init(void)
2495 int ret_val;
2497 printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Jeff Hartmann\n",
2498 AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
2500 ret_val = agp_backend_initialize();
2501 if (ret_val) {
2502 agp_bridge.type = NOT_SUPPORTED;
2503 return ret_val;
2505 ret_val = agp_frontend_initialize();
2506 if (ret_val) {
2507 agp_bridge.type = NOT_SUPPORTED;
2508 agp_backend_cleanup();
2509 return ret_val;
2512 return 0;
2515 static void __exit agp_cleanup(void)
2517 agp_frontend_cleanup();
2518 agp_backend_cleanup();
2521 module_init(agp_init);
2522 module_exit(agp_cleanup);