Merge with Linux 2.4.0-test6-pre4.
[linux-2.6/linux-mips.git] / drivers / char / agp / agpgart_be.c
blob573419a28b1e991d7a4894bf82a0cee6500820c8
1 /*
2 * AGPGART module version 0.99
3 * Copyright (C) 1999 Jeff Hartmann
4 * Copyright (C) 1999 Precision Insight, Inc.
5 * Copyright (C) 1999 Xi Graphics, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
23 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/config.h>
27 #include <linux/version.h>
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/kernel.h>
31 #include <linux/sched.h>
32 #include <linux/mm.h>
33 #include <linux/string.h>
34 #include <linux/errno.h>
35 #include <linux/malloc.h>
36 #include <linux/vmalloc.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/pagemap.h>
40 #include <linux/miscdevice.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
44 #include <asm/page.h>
46 #include <linux/agp_backend.h>
47 #include "agp.h"
49 MODULE_AUTHOR("Jeff Hartmann <jhartmann@precisioninsight.com>");
50 MODULE_PARM(agp_try_unsupported, "1i");
51 EXPORT_SYMBOL(agp_free_memory);
52 EXPORT_SYMBOL(agp_allocate_memory);
53 EXPORT_SYMBOL(agp_copy_info);
54 EXPORT_SYMBOL(agp_bind_memory);
55 EXPORT_SYMBOL(agp_unbind_memory);
56 EXPORT_SYMBOL(agp_enable);
57 EXPORT_SYMBOL(agp_backend_acquire);
58 EXPORT_SYMBOL(agp_backend_release);
60 static void flush_cache(void);
62 static struct agp_bridge_data agp_bridge;
63 static int agp_try_unsupported __initdata = 0;
66 static inline void flush_cache(void)
68 #if defined(__i386__)
69 asm volatile ("wbinvd":::"memory");
70 #elif defined(__alpha__)
71 /* ??? I wonder if we'll really need to flush caches, or if the
72 core logic can manage to keep the system coherent. The ARM
73 speaks only of using `cflush' to get things in memory in
74 preparation for power failure.
76 If we do need to call `cflush', we'll need a target page,
77 as we can only flush one page at a time. */
78 mb();
79 #else
80 #error "Please define flush_cache."
81 #endif
84 #ifdef CONFIG_SMP
85 static atomic_t cpus_waiting;
87 static void ipi_handler(void *null)
89 flush_cache();
90 atomic_dec(&cpus_waiting);
91 while (atomic_read(&cpus_waiting) > 0)
92 barrier();
95 static void smp_flush_cache(void)
97 atomic_set(&cpus_waiting, smp_num_cpus - 1);
98 if (smp_call_function(ipi_handler, NULL, 1, 0) != 0)
99 panic(PFX "timed out waiting for the other CPUs!\n");
100 flush_cache();
101 while (atomic_read(&cpus_waiting) > 0)
102 barrier();
104 #define global_cache_flush smp_flush_cache
105 #else /* CONFIG_SMP */
106 #define global_cache_flush flush_cache
107 #endif /* CONFIG_SMP */
109 int agp_backend_acquire(void)
111 atomic_inc(&agp_bridge.agp_in_use);
113 if (atomic_read(&agp_bridge.agp_in_use) != 1) {
114 atomic_dec(&agp_bridge.agp_in_use);
115 return -EBUSY;
117 MOD_INC_USE_COUNT;
118 return 0;
121 void agp_backend_release(void)
123 atomic_dec(&agp_bridge.agp_in_use);
124 MOD_DEC_USE_COUNT;
128 * Basic Page Allocation Routines -
129 * These routines handle page allocation
130 * and by default they reserve the allocated
131 * memory. They also handle incrementing the
132 * current_memory_agp value, Which is checked
133 * against a maximum value.
136 static unsigned long agp_alloc_page(void)
138 void *pt;
140 pt = (void *) __get_free_page(GFP_KERNEL);
141 if (pt == NULL) {
142 return 0;
144 atomic_inc(&mem_map[MAP_NR(pt)].count);
145 set_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
146 atomic_inc(&agp_bridge.current_memory_agp);
147 return (unsigned long) pt;
150 static void agp_destroy_page(unsigned long page)
152 void *pt = (void *) page;
154 if (pt == NULL) {
155 return;
157 atomic_dec(&mem_map[MAP_NR(pt)].count);
158 clear_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
159 wake_up(&mem_map[MAP_NR(pt)].wait);
160 free_page((unsigned long) pt);
161 atomic_dec(&agp_bridge.current_memory_agp);
164 /* End Basic Page Allocation Routines */
167 * Generic routines for handling agp_memory structures -
168 * They use the basic page allocation routines to do the
169 * brunt of the work.
173 static void agp_free_key(int key)
176 if (key < 0) {
177 return;
179 if (key < MAXKEY) {
180 clear_bit(key, agp_bridge.key_list);
184 static int agp_get_key(void)
186 int bit;
188 bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY);
189 if (bit < MAXKEY) {
190 set_bit(bit, agp_bridge.key_list);
191 return bit;
193 return -1;
196 static agp_memory *agp_create_memory(int scratch_pages)
198 agp_memory *new;
200 new = kmalloc(sizeof(agp_memory), GFP_KERNEL);
202 if (new == NULL) {
203 return NULL;
205 memset(new, 0, sizeof(agp_memory));
206 new->key = agp_get_key();
208 if (new->key < 0) {
209 kfree(new);
210 return NULL;
212 new->memory = vmalloc(PAGE_SIZE * scratch_pages);
214 if (new->memory == NULL) {
215 agp_free_key(new->key);
216 kfree(new);
217 return NULL;
219 new->num_scratch_pages = scratch_pages;
220 return new;
223 void agp_free_memory(agp_memory * curr)
225 int i;
227 if (curr == NULL) {
228 return;
230 if (curr->is_bound == TRUE) {
231 agp_unbind_memory(curr);
233 if (curr->type != 0) {
234 agp_bridge.free_by_type(curr);
235 return;
237 if (curr->page_count != 0) {
238 for (i = 0; i < curr->page_count; i++) {
239 curr->memory[i] &= ~(0x00000fff);
240 agp_destroy_page((unsigned long)
241 phys_to_virt(curr->memory[i]));
244 agp_free_key(curr->key);
245 vfree(curr->memory);
246 kfree(curr);
247 MOD_DEC_USE_COUNT;
250 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
252 agp_memory *agp_allocate_memory(size_t page_count, u32 type)
254 int scratch_pages;
255 agp_memory *new;
256 int i;
258 if ((atomic_read(&agp_bridge.current_memory_agp) + page_count) >
259 agp_bridge.max_memory_agp) {
260 return NULL;
263 if (type != 0) {
264 new = agp_bridge.alloc_by_type(page_count, type);
265 return new;
267 /* We always increase the module count, since free auto-decrements
268 * it
271 MOD_INC_USE_COUNT;
273 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
275 new = agp_create_memory(scratch_pages);
277 if (new == NULL) {
278 MOD_DEC_USE_COUNT;
279 return NULL;
281 for (i = 0; i < page_count; i++) {
282 new->memory[i] = agp_alloc_page();
284 if (new->memory[i] == 0) {
285 /* Free this structure */
286 agp_free_memory(new);
287 return NULL;
289 new->memory[i] =
290 agp_bridge.mask_memory(
291 virt_to_phys((void *) new->memory[i]),
292 type);
293 new->page_count++;
296 return new;
299 /* End - Generic routines for handling agp_memory structures */
301 static int agp_return_size(void)
303 int current_size;
304 void *temp;
306 temp = agp_bridge.current_size;
308 switch (agp_bridge.size_type) {
309 case U8_APER_SIZE:
310 current_size = A_SIZE_8(temp)->size;
311 break;
312 case U16_APER_SIZE:
313 current_size = A_SIZE_16(temp)->size;
314 break;
315 case U32_APER_SIZE:
316 current_size = A_SIZE_32(temp)->size;
317 break;
318 case LVL2_APER_SIZE:
319 current_size = A_SIZE_LVL2(temp)->size;
320 break;
321 case FIXED_APER_SIZE:
322 current_size = A_SIZE_FIX(temp)->size;
323 break;
324 default:
325 current_size = 0;
326 break;
329 return current_size;
332 /* Routine to copy over information structure */
334 void agp_copy_info(agp_kern_info * info)
336 memset(info, 0, sizeof(agp_kern_info));
337 info->version.major = agp_bridge.version->major;
338 info->version.minor = agp_bridge.version->minor;
339 info->device = agp_bridge.dev;
340 info->chipset = agp_bridge.type;
341 info->mode = agp_bridge.mode;
342 info->aper_base = agp_bridge.gart_bus_addr;
343 info->aper_size = agp_return_size();
344 info->max_memory = agp_bridge.max_memory_agp;
345 info->current_memory = atomic_read(&agp_bridge.current_memory_agp);
348 /* End - Routine to copy over information structure */
351 * Routines for handling swapping of agp_memory into the GATT -
352 * These routines take agp_memory and insert them into the GATT.
353 * They call device specific routines to actually write to the GATT.
356 int agp_bind_memory(agp_memory * curr, off_t pg_start)
358 int ret_val;
360 if ((curr == NULL) || (curr->is_bound == TRUE)) {
361 return -EINVAL;
363 if (curr->is_flushed == FALSE) {
364 CACHE_FLUSH();
365 curr->is_flushed = TRUE;
367 ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type);
369 if (ret_val != 0) {
370 return ret_val;
372 curr->is_bound = TRUE;
373 curr->pg_start = pg_start;
374 return 0;
377 int agp_unbind_memory(agp_memory * curr)
379 int ret_val;
381 if (curr == NULL) {
382 return -EINVAL;
384 if (curr->is_bound != TRUE) {
385 return -EINVAL;
387 ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type);
389 if (ret_val != 0) {
390 return ret_val;
392 curr->is_bound = FALSE;
393 curr->pg_start = 0;
394 return 0;
397 /* End - Routines for handling swapping of agp_memory into the GATT */
400 * Driver routines - start
401 * Currently this module supports the
402 * i810, 440lx, 440bx, 440gx, via vp3, via mvp3,
403 * amd irongate, ALi M1541 and generic support for the
404 * SiS chipsets.
407 /* Generic Agp routines - Start */
409 static void agp_generic_agp_enable(u32 mode)
411 struct pci_dev *device = NULL;
412 u32 command, scratch, cap_id;
413 u8 cap_ptr;
415 pci_read_config_dword(agp_bridge.dev,
416 agp_bridge.capndx + 4,
417 &command);
420 * PASS1: go throu all devices that claim to be
421 * AGP devices and collect their data.
424 while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8,
425 device)) != NULL) {
426 pci_read_config_dword(device, 0x04, &scratch);
428 if (!(scratch & 0x00100000))
429 continue;
431 pci_read_config_byte(device, 0x34, &cap_ptr);
433 if (cap_ptr != 0x00) {
434 do {
435 pci_read_config_dword(device,
436 cap_ptr, &cap_id);
438 if ((cap_id & 0xff) != 0x02)
439 cap_ptr = (cap_id >> 8) & 0xff;
441 while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
443 if (cap_ptr != 0x00) {
445 * Ok, here we have a AGP device. Disable impossible
446 * settings, and adjust the readqueue to the minimum.
449 pci_read_config_dword(device, cap_ptr + 4, &scratch);
451 /* adjust RQ depth */
452 command =
453 ((command & ~0xff000000) |
454 min((mode & 0xff000000),
455 min((command & 0xff000000),
456 (scratch & 0xff000000))));
458 /* disable SBA if it's not supported */
459 if (!((command & 0x00000200) &&
460 (scratch & 0x00000200) &&
461 (mode & 0x00000200)))
462 command &= ~0x00000200;
464 /* disable FW if it's not supported */
465 if (!((command & 0x00000010) &&
466 (scratch & 0x00000010) &&
467 (mode & 0x00000010)))
468 command &= ~0x00000010;
470 if (!((command & 4) &&
471 (scratch & 4) &&
472 (mode & 4)))
473 command &= ~0x00000004;
475 if (!((command & 2) &&
476 (scratch & 2) &&
477 (mode & 2)))
478 command &= ~0x00000002;
480 if (!((command & 1) &&
481 (scratch & 1) &&
482 (mode & 1)))
483 command &= ~0x00000001;
487 * PASS2: Figure out the 4X/2X/1X setting and enable the
488 * target (our motherboard chipset).
491 if (command & 4) {
492 command &= ~3; /* 4X */
494 if (command & 2) {
495 command &= ~5; /* 2X */
497 if (command & 1) {
498 command &= ~6; /* 1X */
500 command |= 0x00000100;
502 pci_write_config_dword(agp_bridge.dev,
503 agp_bridge.capndx + 8,
504 command);
507 * PASS3: Go throu all AGP devices and update the
508 * command registers.
511 while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8,
512 device)) != NULL) {
513 pci_read_config_dword(device, 0x04, &scratch);
515 if (!(scratch & 0x00100000))
516 continue;
518 pci_read_config_byte(device, 0x34, &cap_ptr);
520 if (cap_ptr != 0x00) {
521 do {
522 pci_read_config_dword(device,
523 cap_ptr, &cap_id);
525 if ((cap_id & 0xff) != 0x02)
526 cap_ptr = (cap_id >> 8) & 0xff;
528 while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
530 if (cap_ptr != 0x00)
531 pci_write_config_dword(device, cap_ptr + 8, command);
535 static int agp_generic_create_gatt_table(void)
537 char *table;
538 char *table_end;
539 int size;
540 int page_order;
541 int num_entries;
542 int i;
543 void *temp;
545 /* The generic routines can't handle 2 level gatt's */
546 if (agp_bridge.size_type == LVL2_APER_SIZE) {
547 return -EINVAL;
550 table = NULL;
551 i = agp_bridge.aperture_size_idx;
552 temp = agp_bridge.current_size;
553 size = page_order = num_entries = 0;
555 if (agp_bridge.size_type != FIXED_APER_SIZE) {
556 do {
557 switch (agp_bridge.size_type) {
558 case U8_APER_SIZE:
559 size = A_SIZE_8(temp)->size;
560 page_order =
561 A_SIZE_8(temp)->page_order;
562 num_entries =
563 A_SIZE_8(temp)->num_entries;
564 break;
565 case U16_APER_SIZE:
566 size = A_SIZE_16(temp)->size;
567 page_order = A_SIZE_16(temp)->page_order;
568 num_entries = A_SIZE_16(temp)->num_entries;
569 break;
570 case U32_APER_SIZE:
571 size = A_SIZE_32(temp)->size;
572 page_order = A_SIZE_32(temp)->page_order;
573 num_entries = A_SIZE_32(temp)->num_entries;
574 break;
575 /* This case will never really happen. */
576 case FIXED_APER_SIZE:
577 case LVL2_APER_SIZE:
578 default:
579 size = page_order = num_entries = 0;
580 break;
583 table = (char *) __get_free_pages(GFP_KERNEL,
584 page_order);
586 if (table == NULL) {
587 i++;
588 switch (agp_bridge.size_type) {
589 case U8_APER_SIZE:
590 agp_bridge.current_size = A_IDX8();
591 break;
592 case U16_APER_SIZE:
593 agp_bridge.current_size = A_IDX16();
594 break;
595 case U32_APER_SIZE:
596 agp_bridge.current_size = A_IDX32();
597 break;
598 /* This case will never really
599 * happen.
601 case FIXED_APER_SIZE:
602 case LVL2_APER_SIZE:
603 default:
604 agp_bridge.current_size =
605 agp_bridge.current_size;
606 break;
608 } else {
609 agp_bridge.aperture_size_idx = i;
611 } while ((table == NULL) &&
612 (i < agp_bridge.num_aperture_sizes));
613 } else {
614 size = ((aper_size_info_fixed *) temp)->size;
615 page_order = ((aper_size_info_fixed *) temp)->page_order;
616 num_entries = ((aper_size_info_fixed *) temp)->num_entries;
617 table = (char *) __get_free_pages(GFP_KERNEL, page_order);
620 if (table == NULL) {
621 return -ENOMEM;
623 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
625 for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
626 set_bit(PG_reserved, &mem_map[i].flags);
629 agp_bridge.gatt_table_real = (unsigned long *) table;
630 CACHE_FLUSH();
631 agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
632 (PAGE_SIZE * (1 << page_order)));
633 CACHE_FLUSH();
635 if (agp_bridge.gatt_table == NULL) {
636 for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
637 clear_bit(PG_reserved, &mem_map[i].flags);
640 free_pages((unsigned long) table, page_order);
642 return -ENOMEM;
644 agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real);
646 for (i = 0; i < num_entries; i++) {
647 agp_bridge.gatt_table[i] =
648 (unsigned long) agp_bridge.scratch_page;
651 return 0;
654 static int agp_generic_free_gatt_table(void)
656 int i;
657 int page_order;
658 char *table, *table_end;
659 void *temp;
661 temp = agp_bridge.current_size;
663 switch (agp_bridge.size_type) {
664 case U8_APER_SIZE:
665 page_order = A_SIZE_8(temp)->page_order;
666 break;
667 case U16_APER_SIZE:
668 page_order = A_SIZE_16(temp)->page_order;
669 break;
670 case U32_APER_SIZE:
671 page_order = A_SIZE_32(temp)->page_order;
672 break;
673 case FIXED_APER_SIZE:
674 page_order = A_SIZE_FIX(temp)->page_order;
675 break;
676 case LVL2_APER_SIZE:
677 /* The generic routines can't deal with 2 level gatt's */
678 return -EINVAL;
679 break;
680 default:
681 page_order = 0;
682 break;
685 /* Do not worry about freeing memory, because if this is
686 * called, then all agp memory is deallocated and removed
687 * from the table.
690 iounmap(agp_bridge.gatt_table);
691 table = (char *) agp_bridge.gatt_table_real;
692 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
694 for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
695 clear_bit(PG_reserved, &mem_map[i].flags);
698 free_pages((unsigned long) agp_bridge.gatt_table_real, page_order);
699 return 0;
702 static int agp_generic_insert_memory(agp_memory * mem,
703 off_t pg_start, int type)
705 int i, j, num_entries;
706 void *temp;
708 temp = agp_bridge.current_size;
710 switch (agp_bridge.size_type) {
711 case U8_APER_SIZE:
712 num_entries = A_SIZE_8(temp)->num_entries;
713 break;
714 case U16_APER_SIZE:
715 num_entries = A_SIZE_16(temp)->num_entries;
716 break;
717 case U32_APER_SIZE:
718 num_entries = A_SIZE_32(temp)->num_entries;
719 break;
720 case FIXED_APER_SIZE:
721 num_entries = A_SIZE_FIX(temp)->num_entries;
722 break;
723 case LVL2_APER_SIZE:
724 /* The generic routines can't deal with 2 level gatt's */
725 return -EINVAL;
726 break;
727 default:
728 num_entries = 0;
729 break;
732 if (type != 0 || mem->type != 0) {
733 /* The generic routines know nothing of memory types */
734 return -EINVAL;
736 if ((pg_start + mem->page_count) > num_entries) {
737 return -EINVAL;
739 j = pg_start;
741 while (j < (pg_start + mem->page_count)) {
742 if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
743 return -EBUSY;
745 j++;
748 if (mem->is_flushed == FALSE) {
749 CACHE_FLUSH();
750 mem->is_flushed = TRUE;
752 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
753 agp_bridge.gatt_table[j] = mem->memory[i];
756 agp_bridge.tlb_flush(mem);
757 return 0;
760 static int agp_generic_remove_memory(agp_memory * mem, off_t pg_start,
761 int type)
763 int i;
765 if (type != 0 || mem->type != 0) {
766 /* The generic routines know nothing of memory types */
767 return -EINVAL;
769 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
770 agp_bridge.gatt_table[i] =
771 (unsigned long) agp_bridge.scratch_page;
774 agp_bridge.tlb_flush(mem);
775 return 0;
778 static agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
780 return NULL;
783 static void agp_generic_free_by_type(agp_memory * curr)
785 if (curr->memory != NULL) {
786 vfree(curr->memory);
788 agp_free_key(curr->key);
789 kfree(curr);
792 void agp_enable(u32 mode)
794 agp_bridge.agp_enable(mode);
797 /* End - Generic Agp routines */
799 #ifdef CONFIG_AGP_I810
800 static aper_size_info_fixed intel_i810_sizes[] =
802 {64, 16384, 4},
803 /* The 32M mode still requires a 64k gatt */
804 {32, 8192, 4}
807 #define AGP_DCACHE_MEMORY 1
808 #define AGP_PHYS_MEMORY 2
810 static gatt_mask intel_i810_masks[] =
812 {I810_PTE_VALID, 0},
813 {(I810_PTE_VALID | I810_PTE_LOCAL), AGP_DCACHE_MEMORY},
814 {I810_PTE_VALID, 0}
817 static struct _intel_i810_private {
818 struct pci_dev *i810_dev; /* device one */
819 volatile u8 *registers;
820 int num_dcache_entries;
821 } intel_i810_private;
823 static int intel_i810_fetch_size(void)
825 u32 smram_miscc;
826 aper_size_info_fixed *values;
828 pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc);
829 values = A_SIZE_FIX(agp_bridge.aperture_sizes);
831 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
832 printk(KERN_WARNING PFX "i810 is disabled\n");
833 return 0;
835 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
836 agp_bridge.previous_size =
837 agp_bridge.current_size = (void *) (values + 1);
838 agp_bridge.aperture_size_idx = 1;
839 return values[1].size;
840 } else {
841 agp_bridge.previous_size =
842 agp_bridge.current_size = (void *) (values);
843 agp_bridge.aperture_size_idx = 0;
844 return values[0].size;
847 return 0;
850 static int intel_i810_configure(void)
852 aper_size_info_fixed *current_size;
853 u32 temp;
854 int i;
856 current_size = A_SIZE_FIX(agp_bridge.current_size);
858 pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp);
859 temp &= 0xfff80000;
861 intel_i810_private.registers =
862 (volatile u8 *) ioremap(temp, 128 * 4096);
864 if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL)
865 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
866 /* This will need to be dynamically assigned */
867 printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n");
868 intel_i810_private.num_dcache_entries = 1024;
870 pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp);
871 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
872 OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL,
873 agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED);
874 CACHE_FLUSH();
876 if (agp_bridge.needs_scratch_page == TRUE) {
877 for (i = 0; i < current_size->num_entries; i++) {
878 OUTREG32(intel_i810_private.registers,
879 I810_PTE_BASE + (i * 4),
880 agp_bridge.scratch_page);
883 return 0;
886 static void intel_i810_cleanup(void)
888 OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0);
889 iounmap((void *) intel_i810_private.registers);
892 static void intel_i810_tlbflush(agp_memory * mem)
894 return;
897 static void intel_i810_agp_enable(u32 mode)
899 return;
902 static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start,
903 int type)
905 int i, j, num_entries;
906 void *temp;
908 temp = agp_bridge.current_size;
909 num_entries = A_SIZE_FIX(temp)->num_entries;
911 if ((pg_start + mem->page_count) > num_entries) {
912 return -EINVAL;
914 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
915 if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
916 return -EBUSY;
920 if (type != 0 || mem->type != 0) {
921 if ((type == AGP_DCACHE_MEMORY) &&
922 (mem->type == AGP_DCACHE_MEMORY)) {
923 /* special insert */
924 CACHE_FLUSH();
925 for (i = pg_start;
926 i < (pg_start + mem->page_count); i++) {
927 OUTREG32(intel_i810_private.registers,
928 I810_PTE_BASE + (i * 4),
929 (i * 4096) | I810_PTE_LOCAL |
930 I810_PTE_VALID);
932 CACHE_FLUSH();
933 agp_bridge.tlb_flush(mem);
934 return 0;
936 if((type == AGP_PHYS_MEMORY) &&
937 (mem->type == AGP_PHYS_MEMORY)) {
938 goto insert;
940 return -EINVAL;
943 insert:
944 CACHE_FLUSH();
945 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
946 OUTREG32(intel_i810_private.registers,
947 I810_PTE_BASE + (j * 4), mem->memory[i]);
949 CACHE_FLUSH();
951 agp_bridge.tlb_flush(mem);
952 return 0;
955 static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start,
956 int type)
958 int i;
960 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
961 OUTREG32(intel_i810_private.registers,
962 I810_PTE_BASE + (i * 4),
963 agp_bridge.scratch_page);
966 agp_bridge.tlb_flush(mem);
967 return 0;
970 static agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
972 agp_memory *new;
974 if (type == AGP_DCACHE_MEMORY) {
975 if (pg_count != intel_i810_private.num_dcache_entries) {
976 return NULL;
978 new = agp_create_memory(1);
980 if (new == NULL) {
981 return NULL;
983 new->type = AGP_DCACHE_MEMORY;
984 new->page_count = pg_count;
985 new->num_scratch_pages = 0;
986 vfree(new->memory);
987 MOD_INC_USE_COUNT;
988 return new;
990 if(type == AGP_PHYS_MEMORY) {
991 /* The I810 requires a physical address to program
992 * it's mouse pointer into hardware. However the
993 * Xserver still writes to it through the agp
994 * aperture
996 if (pg_count != 1) {
997 return NULL;
999 new = agp_create_memory(1);
1001 if (new == NULL) {
1002 return NULL;
1004 MOD_INC_USE_COUNT;
1005 new->memory[0] = agp_alloc_page();
1007 if (new->memory[0] == 0) {
1008 /* Free this structure */
1009 agp_free_memory(new);
1010 return NULL;
1012 new->memory[0] =
1013 agp_bridge.mask_memory(
1014 virt_to_phys((void *) new->memory[0]),
1015 type);
1016 new->page_count = 1;
1017 new->num_scratch_pages = 1;
1018 new->type = AGP_PHYS_MEMORY;
1019 new->physical = virt_to_phys((void *) new->memory[0]);
1020 return new;
1023 return NULL;
1026 static void intel_i810_free_by_type(agp_memory * curr)
1028 agp_free_key(curr->key);
1029 if(curr->type == AGP_PHYS_MEMORY) {
1030 agp_destroy_page((unsigned long)
1031 phys_to_virt(curr->memory[0]));
1032 vfree(curr->memory);
1034 kfree(curr);
1035 MOD_DEC_USE_COUNT;
1038 static unsigned long intel_i810_mask_memory(unsigned long addr, int type)
1040 /* Type checking must be done elsewhere */
1041 return addr | agp_bridge.masks[type].mask;
1044 static int __init intel_i810_setup(struct pci_dev *i810_dev)
1046 intel_i810_private.i810_dev = i810_dev;
1048 agp_bridge.masks = intel_i810_masks;
1049 agp_bridge.num_of_masks = 2;
1050 agp_bridge.aperture_sizes = (void *) intel_i810_sizes;
1051 agp_bridge.size_type = FIXED_APER_SIZE;
1052 agp_bridge.num_aperture_sizes = 2;
1053 agp_bridge.dev_private_data = (void *) &intel_i810_private;
1054 agp_bridge.needs_scratch_page = TRUE;
1055 agp_bridge.configure = intel_i810_configure;
1056 agp_bridge.fetch_size = intel_i810_fetch_size;
1057 agp_bridge.cleanup = intel_i810_cleanup;
1058 agp_bridge.tlb_flush = intel_i810_tlbflush;
1059 agp_bridge.mask_memory = intel_i810_mask_memory;
1060 agp_bridge.agp_enable = intel_i810_agp_enable;
1061 agp_bridge.cache_flush = global_cache_flush;
1062 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1063 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1064 agp_bridge.insert_memory = intel_i810_insert_entries;
1065 agp_bridge.remove_memory = intel_i810_remove_entries;
1066 agp_bridge.alloc_by_type = intel_i810_alloc_by_type;
1067 agp_bridge.free_by_type = intel_i810_free_by_type;
1069 return 0;
1072 #endif /* CONFIG_AGP_I810 */
1074 #ifdef CONFIG_AGP_INTEL
1076 static int intel_fetch_size(void)
1078 int i;
1079 u16 temp;
1080 aper_size_info_16 *values;
1082 pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp);
1083 values = A_SIZE_16(agp_bridge.aperture_sizes);
1085 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1086 if (temp == values[i].size_value) {
1087 agp_bridge.previous_size =
1088 agp_bridge.current_size = (void *) (values + i);
1089 agp_bridge.aperture_size_idx = i;
1090 return values[i].size;
1094 return 0;
1097 static void intel_tlbflush(agp_memory * mem)
1099 pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200);
1100 pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
1103 static void intel_cleanup(void)
1105 u16 temp;
1106 aper_size_info_16 *previous_size;
1108 previous_size = A_SIZE_16(agp_bridge.previous_size);
1109 pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
1110 pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
1111 pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
1112 previous_size->size_value);
1115 static int intel_configure(void)
1117 u32 temp;
1118 u16 temp2;
1119 aper_size_info_16 *current_size;
1121 current_size = A_SIZE_16(agp_bridge.current_size);
1123 /* aperture size */
1124 pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
1125 current_size->size_value);
1127 /* address to map to */
1128 pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
1129 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1131 /* attbase - aperture base */
1132 pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
1133 agp_bridge.gatt_bus_addr);
1135 /* agpctrl */
1136 pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
1138 /* paccfg/nbxcfg */
1139 pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
1140 pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG,
1141 (temp2 & ~(1 << 10)) | (1 << 9));
1142 /* clear any possible error conditions */
1143 pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7);
1144 return 0;
1147 static int intel_840_configure(void)
1149 u32 temp;
1150 u16 temp2;
1151 aper_size_info_16 *current_size;
1153 current_size = A_SIZE_16(agp_bridge.current_size);
1155 /* aperture size */
1156 pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
1157 (char)current_size->size_value);
1159 /* address to map to */
1160 pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
1161 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1163 /* attbase - aperture base */
1164 pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
1165 agp_bridge.gatt_bus_addr);
1167 /* agpctrl */
1168 pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
1170 /* mcgcfg */
1171 pci_read_config_word(agp_bridge.dev, INTEL_I840_MCHCFG, &temp2);
1172 pci_write_config_word(agp_bridge.dev, INTEL_I840_MCHCFG,
1173 temp2 | (1 << 9));
1174 /* clear any possible error conditions */
1175 pci_write_config_word(agp_bridge.dev, INTEL_I840_ERRSTS, 0xc000);
1176 return 0;
1179 static unsigned long intel_mask_memory(unsigned long addr, int type)
1181 /* Memory type is ignored */
1183 return addr | agp_bridge.masks[0].mask;
1187 /* Setup function */
1188 static gatt_mask intel_generic_masks[] =
1190 {0x00000017, 0}
1193 static aper_size_info_16 intel_generic_sizes[7] =
1195 {256, 65536, 6, 0},
1196 {128, 32768, 5, 32},
1197 {64, 16384, 4, 48},
1198 {32, 8192, 3, 56},
1199 {16, 4096, 2, 60},
1200 {8, 2048, 1, 62},
1201 {4, 1024, 0, 63}
1204 static int __init intel_generic_setup (struct pci_dev *pdev)
1206 agp_bridge.masks = intel_generic_masks;
1207 agp_bridge.num_of_masks = 1;
1208 agp_bridge.aperture_sizes = (void *) intel_generic_sizes;
1209 agp_bridge.size_type = U16_APER_SIZE;
1210 agp_bridge.num_aperture_sizes = 7;
1211 agp_bridge.dev_private_data = NULL;
1212 agp_bridge.needs_scratch_page = FALSE;
1213 agp_bridge.configure = intel_configure;
1214 agp_bridge.fetch_size = intel_fetch_size;
1215 agp_bridge.cleanup = intel_cleanup;
1216 agp_bridge.tlb_flush = intel_tlbflush;
1217 agp_bridge.mask_memory = intel_mask_memory;
1218 agp_bridge.agp_enable = agp_generic_agp_enable;
1219 agp_bridge.cache_flush = global_cache_flush;
1220 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1221 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1222 agp_bridge.insert_memory = agp_generic_insert_memory;
1223 agp_bridge.remove_memory = agp_generic_remove_memory;
1224 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1225 agp_bridge.free_by_type = agp_generic_free_by_type;
1227 return 0;
1229 (void) pdev; /* unused */
1232 static int __init intel_840_setup (struct pci_dev *pdev)
1234 agp_bridge.masks = intel_generic_masks;
1235 agp_bridge.num_of_masks = 1;
1236 agp_bridge.aperture_sizes = (void *) intel_generic_sizes;
1237 agp_bridge.size_type = U16_APER_SIZE;
1238 agp_bridge.num_aperture_sizes = 7;
1239 agp_bridge.dev_private_data = NULL;
1240 agp_bridge.needs_scratch_page = FALSE;
1241 agp_bridge.configure = intel_840_configure;
1242 agp_bridge.fetch_size = intel_fetch_size;
1243 agp_bridge.cleanup = intel_cleanup;
1244 agp_bridge.tlb_flush = intel_tlbflush;
1245 agp_bridge.mask_memory = intel_mask_memory;
1246 agp_bridge.agp_enable = agp_generic_agp_enable;
1247 agp_bridge.cache_flush = global_cache_flush;
1248 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1249 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1250 agp_bridge.insert_memory = agp_generic_insert_memory;
1251 agp_bridge.remove_memory = agp_generic_remove_memory;
1252 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1253 agp_bridge.free_by_type = agp_generic_free_by_type;
1255 return 0;
1257 (void) pdev; /* unused */
1260 #endif /* CONFIG_AGP_INTEL */
1262 #ifdef CONFIG_AGP_VIA
1264 static int via_fetch_size(void)
1266 int i;
1267 u8 temp;
1268 aper_size_info_8 *values;
1270 values = A_SIZE_8(agp_bridge.aperture_sizes);
1271 pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp);
1272 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1273 if (temp == values[i].size_value) {
1274 agp_bridge.previous_size =
1275 agp_bridge.current_size = (void *) (values + i);
1276 agp_bridge.aperture_size_idx = i;
1277 return values[i].size;
1281 return 0;
1284 static int via_configure(void)
1286 u32 temp;
1287 aper_size_info_8 *current_size;
1289 current_size = A_SIZE_8(agp_bridge.current_size);
1290 /* aperture size */
1291 pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
1292 current_size->size_value);
1293 /* address to map too */
1294 pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp);
1295 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1297 /* GART control register */
1298 pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
1300 /* attbase - aperture GATT base */
1301 pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE,
1302 (agp_bridge.gatt_bus_addr & 0xfffff000) | 3);
1303 return 0;
1306 static void via_cleanup(void)
1308 aper_size_info_8 *previous_size;
1310 previous_size = A_SIZE_8(agp_bridge.previous_size);
1311 pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, 0);
1312 pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
1313 previous_size->size_value);
1316 static void via_tlbflush(agp_memory * mem)
1318 pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000008f);
1319 pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
1322 static unsigned long via_mask_memory(unsigned long addr, int type)
1324 /* Memory type is ignored */
1326 return addr | agp_bridge.masks[0].mask;
1329 static aper_size_info_8 via_generic_sizes[7] =
1331 {256, 65536, 6, 0},
1332 {128, 32768, 5, 128},
1333 {64, 16384, 4, 192},
1334 {32, 8192, 3, 224},
1335 {16, 4096, 2, 240},
1336 {8, 2048, 1, 248},
1337 {4, 1024, 0, 252}
1340 static gatt_mask via_generic_masks[] =
1342 {0x00000000, 0}
1345 static int __init via_generic_setup (struct pci_dev *pdev)
1347 agp_bridge.masks = via_generic_masks;
1348 agp_bridge.num_of_masks = 1;
1349 agp_bridge.aperture_sizes = (void *) via_generic_sizes;
1350 agp_bridge.size_type = U8_APER_SIZE;
1351 agp_bridge.num_aperture_sizes = 7;
1352 agp_bridge.dev_private_data = NULL;
1353 agp_bridge.needs_scratch_page = FALSE;
1354 agp_bridge.configure = via_configure;
1355 agp_bridge.fetch_size = via_fetch_size;
1356 agp_bridge.cleanup = via_cleanup;
1357 agp_bridge.tlb_flush = via_tlbflush;
1358 agp_bridge.mask_memory = via_mask_memory;
1359 agp_bridge.agp_enable = agp_generic_agp_enable;
1360 agp_bridge.cache_flush = global_cache_flush;
1361 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1362 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1363 agp_bridge.insert_memory = agp_generic_insert_memory;
1364 agp_bridge.remove_memory = agp_generic_remove_memory;
1365 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1366 agp_bridge.free_by_type = agp_generic_free_by_type;
1368 return 0;
1370 (void) pdev; /* unused */
1373 #endif /* CONFIG_AGP_VIA */
1375 #ifdef CONFIG_AGP_SIS
1377 static int sis_fetch_size(void)
1379 u8 temp_size;
1380 int i;
1381 aper_size_info_8 *values;
1383 pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size);
1384 values = A_SIZE_8(agp_bridge.aperture_sizes);
1385 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1386 if ((temp_size == values[i].size_value) ||
1387 ((temp_size & ~(0x03)) ==
1388 (values[i].size_value & ~(0x03)))) {
1389 agp_bridge.previous_size =
1390 agp_bridge.current_size = (void *) (values + i);
1392 agp_bridge.aperture_size_idx = i;
1393 return values[i].size;
1397 return 0;
1401 static void sis_tlbflush(agp_memory * mem)
1403 pci_write_config_byte(agp_bridge.dev, SIS_TLBFLUSH, 0x02);
1406 static int sis_configure(void)
1408 u32 temp;
1409 aper_size_info_8 *current_size;
1411 current_size = A_SIZE_8(agp_bridge.current_size);
1412 pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05);
1413 pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp);
1414 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1415 pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE,
1416 agp_bridge.gatt_bus_addr);
1417 pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
1418 current_size->size_value);
1419 return 0;
1422 static void sis_cleanup(void)
1424 aper_size_info_8 *previous_size;
1426 previous_size = A_SIZE_8(agp_bridge.previous_size);
1427 pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
1428 (previous_size->size_value & ~(0x03)));
1431 static unsigned long sis_mask_memory(unsigned long addr, int type)
1433 /* Memory type is ignored */
1435 return addr | agp_bridge.masks[0].mask;
1438 static aper_size_info_8 sis_generic_sizes[7] =
1440 {256, 65536, 6, 99},
1441 {128, 32768, 5, 83},
1442 {64, 16384, 4, 67},
1443 {32, 8192, 3, 51},
1444 {16, 4096, 2, 35},
1445 {8, 2048, 1, 19},
1446 {4, 1024, 0, 3}
1449 static gatt_mask sis_generic_masks[] =
1451 {0x00000000, 0}
1454 static int __init sis_generic_setup (struct pci_dev *pdev)
1456 agp_bridge.masks = sis_generic_masks;
1457 agp_bridge.num_of_masks = 1;
1458 agp_bridge.aperture_sizes = (void *) sis_generic_sizes;
1459 agp_bridge.size_type = U8_APER_SIZE;
1460 agp_bridge.num_aperture_sizes = 7;
1461 agp_bridge.dev_private_data = NULL;
1462 agp_bridge.needs_scratch_page = FALSE;
1463 agp_bridge.configure = sis_configure;
1464 agp_bridge.fetch_size = sis_fetch_size;
1465 agp_bridge.cleanup = sis_cleanup;
1466 agp_bridge.tlb_flush = sis_tlbflush;
1467 agp_bridge.mask_memory = sis_mask_memory;
1468 agp_bridge.agp_enable = agp_generic_agp_enable;
1469 agp_bridge.cache_flush = global_cache_flush;
1470 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1471 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1472 agp_bridge.insert_memory = agp_generic_insert_memory;
1473 agp_bridge.remove_memory = agp_generic_remove_memory;
1474 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1475 agp_bridge.free_by_type = agp_generic_free_by_type;
1477 return 0;
1480 #endif /* CONFIG_AGP_SIS */
1482 #ifdef CONFIG_AGP_AMD
1484 typedef struct _amd_page_map {
1485 unsigned long *real;
1486 unsigned long *remapped;
1487 } amd_page_map;
1489 static struct _amd_irongate_private {
1490 volatile u8 *registers;
1491 amd_page_map **gatt_pages;
1492 int num_tables;
1493 } amd_irongate_private;
1495 static int amd_create_page_map(amd_page_map *page_map)
1497 int i;
1499 page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
1500 if (page_map->real == NULL) {
1501 return -ENOMEM;
1503 set_bit(PG_reserved, &mem_map[MAP_NR(page_map->real)].flags);
1504 CACHE_FLUSH();
1505 page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
1506 PAGE_SIZE);
1507 if (page_map->remapped == NULL) {
1508 clear_bit(PG_reserved,
1509 &mem_map[MAP_NR(page_map->real)].flags);
1510 free_page((unsigned long) page_map->real);
1511 page_map->real = NULL;
1512 return -ENOMEM;
1514 CACHE_FLUSH();
1516 for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
1517 page_map->remapped[i] = agp_bridge.scratch_page;
1520 return 0;
1523 static void amd_free_page_map(amd_page_map *page_map)
1525 iounmap(page_map->remapped);
1526 clear_bit(PG_reserved,
1527 &mem_map[MAP_NR(page_map->real)].flags);
1528 free_page((unsigned long) page_map->real);
1531 static void amd_free_gatt_pages(void)
1533 int i;
1534 amd_page_map **tables;
1535 amd_page_map *entry;
1537 tables = amd_irongate_private.gatt_pages;
1538 for(i = 0; i < amd_irongate_private.num_tables; i++) {
1539 entry = tables[i];
1540 if (entry != NULL) {
1541 if (entry->real != NULL) {
1542 amd_free_page_map(entry);
1544 kfree(entry);
1547 kfree(tables);
1550 static int amd_create_gatt_pages(int nr_tables)
1552 amd_page_map **tables;
1553 amd_page_map *entry;
1554 int retval = 0;
1555 int i;
1557 tables = kmalloc((nr_tables + 1) * sizeof(amd_page_map *),
1558 GFP_KERNEL);
1559 if (tables == NULL) {
1560 return -ENOMEM;
1562 memset(tables, 0, sizeof(amd_page_map *) * (nr_tables + 1));
1563 for (i = 0; i < nr_tables; i++) {
1564 entry = kmalloc(sizeof(amd_page_map), GFP_KERNEL);
1565 if (entry == NULL) {
1566 retval = -ENOMEM;
1567 break;
1569 memset(entry, 0, sizeof(amd_page_map));
1570 tables[i] = entry;
1571 retval = amd_create_page_map(entry);
1572 if (retval != 0) break;
1574 amd_irongate_private.num_tables = nr_tables;
1575 amd_irongate_private.gatt_pages = tables;
1577 if (retval != 0) amd_free_gatt_pages();
1579 return retval;
1582 /* Since we don't need contigious memory we just try
1583 * to get the gatt table once
1586 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
1587 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
1588 GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr))
1589 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
1590 #define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
1591 GET_PAGE_DIR_IDX(addr)]->remapped)
1593 static int amd_create_gatt_table(void)
1595 aper_size_info_lvl2 *value;
1596 amd_page_map page_dir;
1597 unsigned long addr;
1598 int retval;
1599 u32 temp;
1600 int i;
1602 value = A_SIZE_LVL2(agp_bridge.current_size);
1603 retval = amd_create_page_map(&page_dir);
1604 if (retval != 0) {
1605 return retval;
1608 retval = amd_create_gatt_pages(value->num_entries / 1024);
1609 if (retval != 0) {
1610 amd_free_page_map(&page_dir);
1611 return retval;
1614 agp_bridge.gatt_table_real = page_dir.real;
1615 agp_bridge.gatt_table = page_dir.remapped;
1616 agp_bridge.gatt_bus_addr = virt_to_bus(page_dir.real);
1618 /* Get the address for the gart region.
1619 * This is a bus address even on the alpha, b/c its
1620 * used to program the agp master not the cpu
1623 pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp);
1624 addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1625 agp_bridge.gart_bus_addr = addr;
1627 /* Calculate the agp offset */
1628 for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
1629 page_dir.remapped[GET_PAGE_DIR_OFF(addr)] =
1630 virt_to_bus(amd_irongate_private.gatt_pages[i]->real);
1631 page_dir.remapped[GET_PAGE_DIR_OFF(addr)] |= 0x00000001;
1634 return 0;
1637 static int amd_free_gatt_table(void)
1639 amd_page_map page_dir;
1641 page_dir.real = agp_bridge.gatt_table_real;
1642 page_dir.remapped = agp_bridge.gatt_table;
1644 amd_free_gatt_pages();
1645 amd_free_page_map(&page_dir);
1646 return 0;
1649 static int amd_irongate_fetch_size(void)
1651 int i;
1652 u32 temp;
1653 aper_size_info_lvl2 *values;
1655 pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
1656 temp = (temp & 0x0000000e);
1657 values = A_SIZE_LVL2(agp_bridge.aperture_sizes);
1658 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1659 if (temp == values[i].size_value) {
1660 agp_bridge.previous_size =
1661 agp_bridge.current_size = (void *) (values + i);
1663 agp_bridge.aperture_size_idx = i;
1664 return values[i].size;
1668 return 0;
1671 static int amd_irongate_configure(void)
1673 aper_size_info_lvl2 *current_size;
1674 u32 temp;
1675 u16 enable_reg;
1677 current_size = A_SIZE_LVL2(agp_bridge.current_size);
1679 /* Get the memory mapped registers */
1680 pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp);
1681 temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1682 amd_irongate_private.registers = (volatile u8 *) ioremap(temp, 4096);
1684 /* Write out the address of the gatt table */
1685 OUTREG32(amd_irongate_private.registers, AMD_ATTBASE,
1686 agp_bridge.gatt_bus_addr);
1688 /* Write the Sync register */
1689 pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80);
1691 /* Set indexing mode */
1692 pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL2, 0x00);
1694 /* Write the enable register */
1695 enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
1696 enable_reg = (enable_reg | 0x0004);
1697 OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
1699 /* Write out the size register */
1700 pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
1701 temp = (((temp & ~(0x0000000e)) | current_size->size_value)
1702 | 0x00000001);
1703 pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
1705 /* Flush the tlb */
1706 OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
1708 return 0;
1711 static void amd_irongate_cleanup(void)
1713 aper_size_info_lvl2 *previous_size;
1714 u32 temp;
1715 u16 enable_reg;
1717 previous_size = A_SIZE_LVL2(agp_bridge.previous_size);
1719 enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
1720 enable_reg = (enable_reg & ~(0x0004));
1721 OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
1723 /* Write back the previous size and disable gart translation */
1724 pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
1725 temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
1726 pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
1727 iounmap((void *) amd_irongate_private.registers);
1731 * This routine could be implemented by taking the addresses
1732 * written to the GATT, and flushing them individually. However
1733 * currently it just flushes the whole table. Which is probably
1734 * more efficent, since agp_memory blocks can be a large number of
1735 * entries.
1738 static void amd_irongate_tlbflush(agp_memory * temp)
1740 OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
1743 static unsigned long amd_irongate_mask_memory(unsigned long addr, int type)
1745 /* Only type 0 is supported by the irongate */
1747 return addr | agp_bridge.masks[0].mask;
1750 static int amd_insert_memory(agp_memory * mem,
1751 off_t pg_start, int type)
1753 int i, j, num_entries;
1754 unsigned long *cur_gatt;
1755 unsigned long addr;
1757 num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries;
1759 if (type != 0 || mem->type != 0) {
1760 return -EINVAL;
1762 if ((pg_start + mem->page_count) > num_entries) {
1763 return -EINVAL;
1766 j = pg_start;
1767 while (j < (pg_start + mem->page_count)) {
1768 addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
1769 cur_gatt = GET_GATT(addr);
1770 if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) {
1771 return -EBUSY;
1773 j++;
1776 if (mem->is_flushed == FALSE) {
1777 CACHE_FLUSH();
1778 mem->is_flushed = TRUE;
1781 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1782 addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
1783 cur_gatt = GET_GATT(addr);
1784 cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i];
1786 agp_bridge.tlb_flush(mem);
1787 return 0;
1790 static int amd_remove_memory(agp_memory * mem, off_t pg_start,
1791 int type)
1793 int i;
1794 unsigned long *cur_gatt;
1795 unsigned long addr;
1797 if (type != 0 || mem->type != 0) {
1798 return -EINVAL;
1800 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1801 addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr;
1802 cur_gatt = GET_GATT(addr);
1803 cur_gatt[GET_GATT_OFF(addr)] =
1804 (unsigned long) agp_bridge.scratch_page;
1807 agp_bridge.tlb_flush(mem);
1808 return 0;
1811 static aper_size_info_lvl2 amd_irongate_sizes[7] =
1813 {2048, 524288, 0x0000000c},
1814 {1024, 262144, 0x0000000a},
1815 {512, 131072, 0x00000008},
1816 {256, 65536, 0x00000006},
1817 {128, 32768, 0x00000004},
1818 {64, 16384, 0x00000002},
1819 {32, 8192, 0x00000000}
1822 static gatt_mask amd_irongate_masks[] =
1824 {0x00000001, 0}
1827 static int __init amd_irongate_setup (struct pci_dev *pdev)
1829 agp_bridge.masks = amd_irongate_masks;
1830 agp_bridge.num_of_masks = 1;
1831 agp_bridge.aperture_sizes = (void *) amd_irongate_sizes;
1832 agp_bridge.size_type = LVL2_APER_SIZE;
1833 agp_bridge.num_aperture_sizes = 7;
1834 agp_bridge.dev_private_data = (void *) &amd_irongate_private;
1835 agp_bridge.needs_scratch_page = FALSE;
1836 agp_bridge.configure = amd_irongate_configure;
1837 agp_bridge.fetch_size = amd_irongate_fetch_size;
1838 agp_bridge.cleanup = amd_irongate_cleanup;
1839 agp_bridge.tlb_flush = amd_irongate_tlbflush;
1840 agp_bridge.mask_memory = amd_irongate_mask_memory;
1841 agp_bridge.agp_enable = agp_generic_agp_enable;
1842 agp_bridge.cache_flush = global_cache_flush;
1843 agp_bridge.create_gatt_table = amd_create_gatt_table;
1844 agp_bridge.free_gatt_table = amd_free_gatt_table;
1845 agp_bridge.insert_memory = amd_insert_memory;
1846 agp_bridge.remove_memory = amd_remove_memory;
1847 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1848 agp_bridge.free_by_type = agp_generic_free_by_type;
1850 return 0;
1852 (void) pdev; /* unused */
1855 #endif /* CONFIG_AGP_AMD */
1857 #ifdef CONFIG_AGP_ALI
1859 static int ali_fetch_size(void)
1861 int i;
1862 u32 temp;
1863 aper_size_info_32 *values;
1865 pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
1866 temp &= ~(0xfffffff0);
1867 values = A_SIZE_32(agp_bridge.aperture_sizes);
1869 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1870 if (temp == values[i].size_value) {
1871 agp_bridge.previous_size =
1872 agp_bridge.current_size = (void *) (values + i);
1873 agp_bridge.aperture_size_idx = i;
1874 return values[i].size;
1878 return 0;
1881 static void ali_tlbflush(agp_memory * mem)
1883 u32 temp;
1885 pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
1886 pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
1887 ((temp & 0xffffff00) | 0x00000090));
1888 pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
1889 ((temp & 0xffffff00) | 0x00000010));
1892 static void ali_cleanup(void)
1894 aper_size_info_32 *previous_size;
1895 u32 temp;
1897 previous_size = A_SIZE_32(agp_bridge.previous_size);
1899 pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
1900 pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
1901 ((temp & 0xffffff00) | 0x00000090));
1902 pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
1903 previous_size->size_value);
1906 static int ali_configure(void)
1908 u32 temp;
1909 aper_size_info_32 *current_size;
1911 current_size = A_SIZE_32(agp_bridge.current_size);
1913 /* aperture size and gatt addr */
1914 pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
1915 agp_bridge.gatt_bus_addr | current_size->size_value);
1917 /* tlb control */
1918 pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
1919 pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
1920 ((temp & 0xffffff00) | 0x00000010));
1922 /* address to map to */
1923 pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp);
1924 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1925 return 0;
1928 static unsigned long ali_mask_memory(unsigned long addr, int type)
1930 /* Memory type is ignored */
1932 return addr | agp_bridge.masks[0].mask;
1936 /* Setup function */
1937 static gatt_mask ali_generic_masks[] =
1939 {0x00000000, 0}
1942 static aper_size_info_32 ali_generic_sizes[7] =
1944 {256, 65536, 6, 10},
1945 {128, 32768, 5, 9},
1946 {64, 16384, 4, 8},
1947 {32, 8192, 3, 7},
1948 {16, 4096, 2, 6},
1949 {8, 2048, 1, 4},
1950 {4, 1024, 0, 3}
1953 static int __init ali_generic_setup (struct pci_dev *pdev)
1955 agp_bridge.masks = ali_generic_masks;
1956 agp_bridge.num_of_masks = 1;
1957 agp_bridge.aperture_sizes = (void *) ali_generic_sizes;
1958 agp_bridge.size_type = U32_APER_SIZE;
1959 agp_bridge.num_aperture_sizes = 7;
1960 agp_bridge.dev_private_data = NULL;
1961 agp_bridge.needs_scratch_page = FALSE;
1962 agp_bridge.configure = ali_configure;
1963 agp_bridge.fetch_size = ali_fetch_size;
1964 agp_bridge.cleanup = ali_cleanup;
1965 agp_bridge.tlb_flush = ali_tlbflush;
1966 agp_bridge.mask_memory = ali_mask_memory;
1967 agp_bridge.agp_enable = agp_generic_agp_enable;
1968 agp_bridge.cache_flush = global_cache_flush;
1969 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1970 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1971 agp_bridge.insert_memory = agp_generic_insert_memory;
1972 agp_bridge.remove_memory = agp_generic_remove_memory;
1973 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1974 agp_bridge.free_by_type = agp_generic_free_by_type;
1976 return 0;
1978 (void) pdev; /* unused */
1981 #endif /* CONFIG_AGP_ALI */
1984 /* per-chipset initialization data.
1985 * note -- all chipsets for a single vendor MUST be grouped together
1987 static struct {
1988 unsigned short device_id; /* first, to make table easier to read */
1989 unsigned short vendor_id;
1990 enum chipset_type chipset;
1991 const char *vendor_name;
1992 const char *chipset_name;
1993 int (*chipset_setup) (struct pci_dev *pdev);
1994 } agp_bridge_info[] __initdata = {
1996 #ifdef CONFIG_AGP_ALI
1997 { PCI_DEVICE_ID_AL_M1541_0,
1998 PCI_VENDOR_ID_AL,
1999 ALI_M1541,
2000 "Ali",
2001 "M1541",
2002 ali_generic_setup },
2003 { 0,
2004 PCI_VENDOR_ID_AL,
2005 ALI_GENERIC,
2006 "Ali",
2007 "Generic",
2008 ali_generic_setup },
2009 #endif /* CONFIG_AGP_ALI */
2011 #ifdef CONFIG_AGP_AMD
2012 { PCI_DEVICE_ID_AMD_IRONGATE_0,
2013 PCI_VENDOR_ID_AMD,
2014 AMD_IRONGATE,
2015 "AMD",
2016 "Irongate",
2017 amd_irongate_setup },
2018 { 0,
2019 PCI_VENDOR_ID_AMD,
2020 AMD_GENERIC,
2021 "AMD",
2022 "Generic",
2023 amd_irongate_setup },
2024 #endif /* CONFIG_AGP_AMD */
2026 #ifdef CONFIG_AGP_INTEL
2027 { PCI_DEVICE_ID_INTEL_82443LX_0,
2028 PCI_VENDOR_ID_INTEL,
2029 INTEL_LX,
2030 "Intel",
2031 "440LX",
2032 intel_generic_setup },
2033 { PCI_DEVICE_ID_INTEL_82443BX_0,
2034 PCI_VENDOR_ID_INTEL,
2035 INTEL_BX,
2036 "Intel",
2037 "440BX",
2038 intel_generic_setup },
2039 { PCI_DEVICE_ID_INTEL_82443GX_0,
2040 PCI_VENDOR_ID_INTEL,
2041 INTEL_GX,
2042 "Intel",
2043 "440GX",
2044 intel_generic_setup },
2045 { PCI_DEVICE_ID_INTEL_840_0,
2046 PCI_VENDOR_ID_INTEL,
2047 INTEL_I840,
2048 "Intel",
2049 "i840",
2050 intel_840_setup },
2051 { 0,
2052 PCI_VENDOR_ID_INTEL,
2053 INTEL_GENERIC,
2054 "Intel",
2055 "Generic",
2056 intel_generic_setup },
2057 #endif /* CONFIG_AGP_INTEL */
2059 #ifdef CONFIG_AGP_SIS
2060 { PCI_DEVICE_ID_SI_630,
2061 PCI_VENDOR_ID_SI,
2062 SIS_GENERIC,
2063 "SiS",
2064 "630",
2065 sis_generic_setup },
2066 { PCI_DEVICE_ID_SI_540,
2067 PCI_VENDOR_ID_SI,
2068 SIS_GENERIC,
2069 "SiS",
2070 "540",
2071 sis_generic_setup },
2072 { PCI_DEVICE_ID_SI_620,
2073 PCI_VENDOR_ID_SI,
2074 SIS_GENERIC,
2075 "SiS",
2076 "620",
2077 sis_generic_setup },
2078 { PCI_DEVICE_ID_SI_530,
2079 PCI_VENDOR_ID_SI,
2080 SIS_GENERIC,
2081 "SiS",
2082 "530",
2083 sis_generic_setup },
2084 { PCI_DEVICE_ID_SI_630,
2085 PCI_VENDOR_ID_SI,
2086 SIS_GENERIC,
2087 "SiS",
2088 "Generic",
2089 sis_generic_setup },
2090 { PCI_DEVICE_ID_SI_540,
2091 PCI_VENDOR_ID_SI,
2092 SIS_GENERIC,
2093 "SiS",
2094 "Generic",
2095 sis_generic_setup },
2096 { PCI_DEVICE_ID_SI_620,
2097 PCI_VENDOR_ID_SI,
2098 SIS_GENERIC,
2099 "SiS",
2100 "Generic",
2101 sis_generic_setup },
2102 { PCI_DEVICE_ID_SI_530,
2103 PCI_VENDOR_ID_SI,
2104 SIS_GENERIC,
2105 "SiS",
2106 "Generic",
2107 sis_generic_setup },
2108 { 0,
2109 PCI_VENDOR_ID_SI,
2110 SIS_GENERIC,
2111 "SiS",
2112 "Generic",
2113 sis_generic_setup },
2114 #endif /* CONFIG_AGP_SIS */
2116 #ifdef CONFIG_AGP_VIA
2117 { PCI_DEVICE_ID_VIA_8371_0,
2118 PCI_VENDOR_ID_VIA,
2119 VIA_APOLLO_SUPER,
2120 "Via",
2121 "Apollo Super",
2122 via_generic_setup },
2123 { PCI_DEVICE_ID_VIA_8501_0,
2124 PCI_VENDOR_ID_VIA,
2125 VIA_MVP4,
2126 "Via",
2127 "MVP4",
2128 via_generic_setup },
2129 { PCI_DEVICE_ID_VIA_82C597_0,
2130 PCI_VENDOR_ID_VIA,
2131 VIA_VP3,
2132 "Via",
2133 "VP3",
2134 via_generic_setup },
2135 { PCI_DEVICE_ID_VIA_82C598_0,
2136 PCI_VENDOR_ID_VIA,
2137 VIA_MVP3,
2138 "Via",
2139 "MVP3",
2140 via_generic_setup },
2141 { PCI_DEVICE_ID_VIA_82C691_0,
2142 PCI_VENDOR_ID_VIA,
2143 VIA_APOLLO_PRO,
2144 "Via",
2145 "Apollo Pro",
2146 via_generic_setup },
2147 { 0,
2148 PCI_VENDOR_ID_VIA,
2149 VIA_GENERIC,
2150 "Via",
2151 "Generic",
2152 via_generic_setup },
2153 #endif /* CONFIG_AGP_VIA */
2155 { 0, }, /* dummy final entry, always present */
2159 /* scan table above for supported devices */
2160 static int __init agp_lookup_host_bridge (struct pci_dev *pdev)
2162 int i;
2164 for (i = 0; i < arraysize (agp_bridge_info); i++)
2165 if (pdev->vendor == agp_bridge_info[i].vendor_id)
2166 break;
2168 if (i >= arraysize (agp_bridge_info)) {
2169 printk (KERN_DEBUG PFX "unsupported bridge\n");
2170 return -ENODEV;
2173 while ((i < arraysize (agp_bridge_info)) &&
2174 (agp_bridge_info[i].vendor_id == pdev->vendor)) {
2175 if (pdev->device == agp_bridge_info[i].device_id) {
2176 printk (KERN_INFO PFX "Detected %s %s chipset\n",
2177 agp_bridge_info[i].vendor_name,
2178 agp_bridge_info[i].chipset_name);
2179 agp_bridge.type = agp_bridge_info[i].chipset;
2180 return agp_bridge_info[i].chipset_setup (pdev);
2183 i++;
2186 i--; /* point to vendor generic entry (device_id == 0) */
2188 /* try init anyway, if user requests it AND
2189 * there is a 'generic' bridge entry for this vendor */
2190 if (agp_try_unsupported && agp_bridge_info[i].device_id == 0) {
2191 printk(KERN_WARNING PFX "Trying generic %s routines"
2192 " for device id: %04x\n",
2193 agp_bridge_info[i].vendor_name, pdev->device);
2194 agp_bridge.type = agp_bridge_info[i].chipset;
2195 return agp_bridge_info[i].chipset_setup (pdev);
2198 printk(KERN_ERR PFX "Unsupported %s chipset (device id: %04x),"
2199 " you might want to try agp_try_unsupported=1.\n",
2200 agp_bridge_info[i].vendor_name, pdev->device);
2201 return -ENODEV;
2205 /* Supported Device Scanning routine */
2207 static int __init agp_find_supported_device(void)
2209 struct pci_dev *dev = NULL;
2210 u8 cap_ptr = 0x00;
2211 u32 cap_id, scratch;
2213 if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL)
2214 return -ENODEV;
2216 agp_bridge.dev = dev;
2218 /* Need to test for I810 here */
2219 #ifdef CONFIG_AGP_I810
2220 if (dev->vendor == PCI_VENDOR_ID_INTEL) {
2221 struct pci_dev *i810_dev;
2223 switch (dev->device) {
2224 case PCI_DEVICE_ID_INTEL_810_0:
2225 i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
2226 PCI_DEVICE_ID_INTEL_810_1,
2227 NULL);
2228 if (i810_dev == NULL) {
2229 printk(KERN_ERR PFX "Detected an Intel i810,"
2230 " but could not find the secondary"
2231 " device.\n");
2232 return -ENODEV;
2234 printk(KERN_INFO PFX "Detected an Intel "
2235 "i810 Chipset.\n");
2236 agp_bridge.type = INTEL_I810;
2237 return intel_i810_setup (i810_dev);
2239 case PCI_DEVICE_ID_INTEL_810_DC100_0:
2240 i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
2241 PCI_DEVICE_ID_INTEL_810_DC100_1,
2242 NULL);
2243 if (i810_dev == NULL) {
2244 printk(KERN_ERR PFX "Detected an Intel i810 "
2245 "DC100, but could not find the "
2246 "secondary device.\n");
2247 return -ENODEV;
2249 printk(KERN_INFO PFX "Detected an Intel i810 "
2250 "DC100 Chipset.\n");
2251 agp_bridge.type = INTEL_I810;
2252 return intel_i810_setup(i810_dev);
2254 case PCI_DEVICE_ID_INTEL_810_E_0:
2255 i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
2256 PCI_DEVICE_ID_INTEL_810_E_1,
2257 NULL);
2258 if (i810_dev == NULL) {
2259 printk(KERN_ERR PFX "Detected an Intel i810 E"
2260 ", but could not find the secondary "
2261 "device.\n");
2262 return -ENODEV;
2264 printk(KERN_INFO PFX "Detected an Intel i810 E "
2265 "Chipset.\n");
2266 agp_bridge.type = INTEL_I810;
2267 return intel_i810_setup(i810_dev);
2269 default:
2270 break;
2273 #endif /* CONFIG_AGP_I810 */
2275 /* find capndx */
2276 pci_read_config_dword(dev, 0x04, &scratch);
2277 if (!(scratch & 0x00100000))
2278 return -ENODEV;
2280 pci_read_config_byte(dev, 0x34, &cap_ptr);
2281 if (cap_ptr != 0x00) {
2282 do {
2283 pci_read_config_dword(dev, cap_ptr, &cap_id);
2285 if ((cap_id & 0xff) != 0x02)
2286 cap_ptr = (cap_id >> 8) & 0xff;
2288 while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
2290 if (cap_ptr == 0x00)
2291 return -ENODEV;
2292 agp_bridge.capndx = cap_ptr;
2294 /* Fill in the mode register */
2295 pci_read_config_dword(agp_bridge.dev,
2296 agp_bridge.capndx + 4,
2297 &agp_bridge.mode);
2299 /* probe for known chipsets */
2300 return agp_lookup_host_bridge (dev);
2303 struct agp_max_table {
2304 int mem;
2305 int agp;
2308 static struct agp_max_table maxes_table[9] __initdata =
2310 {0, 0},
2311 {32, 4},
2312 {64, 28},
2313 {128, 96},
2314 {256, 204},
2315 {512, 440},
2316 {1024, 942},
2317 {2048, 1920},
2318 {4096, 3932}
2321 static int __init agp_find_max (void)
2323 long memory, index, result;
2325 memory = virt_to_phys(high_memory) >> 20;
2326 index = 1;
2328 while ((memory > maxes_table[index].mem) &&
2329 (index < 8)) {
2330 index++;
2333 result = maxes_table[index - 1].agp +
2334 ( (memory - maxes_table[index - 1].mem) *
2335 (maxes_table[index].agp - maxes_table[index - 1].agp)) /
2336 (maxes_table[index].mem - maxes_table[index - 1].mem);
2338 printk(KERN_INFO PFX "Maximum main memory to use "
2339 "for agp memory: %ldM\n", result);
2340 result = result << (20 - PAGE_SHIFT);
2341 return result;
2344 #define AGPGART_VERSION_MAJOR 0
2345 #define AGPGART_VERSION_MINOR 99
2347 static agp_version agp_current_version =
2349 AGPGART_VERSION_MAJOR,
2350 AGPGART_VERSION_MINOR
2353 static int __init agp_backend_initialize(void)
2355 int size_value, rc, got_gatt=0, got_keylist=0;
2357 memset(&agp_bridge, 0, sizeof(struct agp_bridge_data));
2358 agp_bridge.type = NOT_SUPPORTED;
2359 agp_bridge.max_memory_agp = agp_find_max();
2360 agp_bridge.version = &agp_current_version;
2362 rc = agp_find_supported_device();
2363 if (rc) {
2364 /* not KERN_ERR because error msg should have already printed */
2365 printk(KERN_DEBUG PFX "no supported devices found.\n");
2366 return rc;
2369 if (agp_bridge.needs_scratch_page == TRUE) {
2370 agp_bridge.scratch_page = agp_alloc_page();
2372 if (agp_bridge.scratch_page == 0) {
2373 printk(KERN_ERR PFX "unable to get memory for "
2374 "scratch page.\n");
2375 return -ENOMEM;
2377 agp_bridge.scratch_page =
2378 virt_to_phys((void *) agp_bridge.scratch_page);
2379 agp_bridge.scratch_page =
2380 agp_bridge.mask_memory(agp_bridge.scratch_page, 0);
2383 size_value = agp_bridge.fetch_size();
2385 if (size_value == 0) {
2386 printk(KERN_ERR PFX "unable to detrimine aperture size.\n");
2387 rc = -EINVAL;
2388 goto err_out;
2390 if (agp_bridge.create_gatt_table()) {
2391 printk(KERN_ERR PFX "unable to get memory for graphics "
2392 "translation table.\n");
2393 rc = -ENOMEM;
2394 goto err_out;
2396 got_gatt = 1;
2398 agp_bridge.key_list = vmalloc(PAGE_SIZE * 4);
2399 if (agp_bridge.key_list == NULL) {
2400 printk(KERN_ERR PFX "error allocating memory for key lists.\n");
2401 rc = -ENOMEM;
2402 goto err_out;
2404 got_keylist = 1;
2406 /* FIXME vmalloc'd memory not guaranteed contiguous */
2407 memset(agp_bridge.key_list, 0, PAGE_SIZE * 4);
2409 if (agp_bridge.configure()) {
2410 printk(KERN_ERR PFX "error configuring host chipset.\n");
2411 rc = -EINVAL;
2412 goto err_out;
2415 printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n",
2416 size_value, agp_bridge.gart_bus_addr);
2418 return 0;
2420 err_out:
2421 if (agp_bridge.needs_scratch_page == TRUE) {
2422 agp_bridge.scratch_page &= ~(0x00000fff);
2423 agp_destroy_page((unsigned long)
2424 phys_to_virt(agp_bridge.scratch_page));
2426 if (got_gatt)
2427 agp_bridge.free_gatt_table();
2428 if (got_keylist)
2429 vfree(agp_bridge.key_list);
2430 return rc;
2434 /* cannot be __exit b/c as it could be called from __init code */
2435 static void agp_backend_cleanup(void)
2437 agp_bridge.cleanup();
2438 agp_bridge.free_gatt_table();
2439 vfree(agp_bridge.key_list);
2441 if (agp_bridge.needs_scratch_page == TRUE) {
2442 agp_bridge.scratch_page &= ~(0x00000fff);
2443 agp_destroy_page((unsigned long)
2444 phys_to_virt(agp_bridge.scratch_page));
2448 extern int agp_frontend_initialize(void);
2449 extern void agp_frontend_cleanup(void);
2451 static int __init agp_init(void)
2453 int ret_val;
2455 printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Jeff Hartmann\n",
2456 AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
2458 ret_val = agp_backend_initialize();
2459 if (ret_val)
2460 return ret_val;
2462 ret_val = agp_frontend_initialize();
2463 if (ret_val) {
2464 agp_backend_cleanup();
2465 return ret_val;
2468 return 0;
2471 static void __exit agp_cleanup(void)
2473 agp_frontend_cleanup();
2474 agp_backend_cleanup();
2477 module_init(agp_init);
2478 module_exit(agp_cleanup);