Merge with 2.3.48.
[linux-2.6/linux-mips.git] / drivers / char / agp / agpgart_be.c
blobc478395f8fdc7f7c7f717b61836048a28c5a3a5a
1 /*
2 * AGPGART module version 0.99
3 * Copyright (C) 1999 Jeff Hartmann
4 * Copyright (C) 1999 Precision Insight, Inc.
5 * Copyright (C) 1999 Xi Graphics, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
23 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/config.h>
27 #include <linux/version.h>
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/kernel.h>
31 #include <linux/sched.h>
32 #include <linux/mm.h>
33 #include <linux/string.h>
34 #include <linux/errno.h>
35 #include <linux/malloc.h>
36 #include <linux/vmalloc.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/pagemap.h>
40 #include <linux/miscdevice.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
44 #include <asm/page.h>
46 #include <linux/agp_backend.h>
47 #include "agp.h"
49 MODULE_AUTHOR("Jeff Hartmann <jhartmann@precisioninsight.com>");
50 MODULE_PARM(agp_try_unsupported, "1i");
51 EXPORT_SYMBOL(agp_free_memory);
52 EXPORT_SYMBOL(agp_allocate_memory);
53 EXPORT_SYMBOL(agp_copy_info);
54 EXPORT_SYMBOL(agp_bind_memory);
55 EXPORT_SYMBOL(agp_unbind_memory);
56 EXPORT_SYMBOL(agp_enable);
57 EXPORT_SYMBOL(agp_backend_acquire);
58 EXPORT_SYMBOL(agp_backend_release);
60 static void flush_cache(void);
62 static struct agp_bridge_data agp_bridge;
63 static int agp_try_unsupported __initdata = 0;
66 static inline void flush_cache(void)
68 #if defined(__i386__)
69 asm volatile ("wbinvd":::"memory");
70 #elif defined(__alpha__)
71 /* ??? I wonder if we'll really need to flush caches, or if the
72 core logic can manage to keep the system coherent. The ARM
73 speaks only of using `cflush' to get things in memory in
74 preparation for power failure.
76 If we do need to call `cflush', we'll need a target page,
77 as we can only flush one page at a time. */
78 mb();
79 #else
80 #error "Please define flush_cache."
81 #endif
84 #ifdef __SMP__
85 static atomic_t cpus_waiting;
87 static void ipi_handler(void *null)
89 flush_cache();
90 atomic_dec(&cpus_waiting);
91 while (atomic_read(&cpus_waiting) > 0)
92 barrier();
95 static void smp_flush_cache(void)
97 atomic_set(&cpus_waiting, smp_num_cpus - 1);
98 if (smp_call_function(ipi_handler, NULL, 1, 0) != 0)
99 panic(PFX "timed out waiting for the other CPUs!\n");
100 flush_cache();
101 while (atomic_read(&cpus_waiting) > 0)
102 barrier();
104 #define global_cache_flush smp_flush_cache
105 #else /* __SMP__ */
106 #define global_cache_flush flush_cache
107 #endif /* __SMP__ */
109 int agp_backend_acquire(void)
111 atomic_inc(&agp_bridge.agp_in_use);
113 if (atomic_read(&agp_bridge.agp_in_use) != 1) {
114 atomic_dec(&agp_bridge.agp_in_use);
115 return -EBUSY;
117 MOD_INC_USE_COUNT;
118 return 0;
121 void agp_backend_release(void)
123 atomic_dec(&agp_bridge.agp_in_use);
124 MOD_DEC_USE_COUNT;
128 * Basic Page Allocation Routines -
129 * These routines handle page allocation
130 * and by default they reserve the allocated
131 * memory. They also handle incrementing the
132 * current_memory_agp value, Which is checked
133 * against a maximum value.
136 static unsigned long agp_alloc_page(void)
138 void *pt;
140 pt = (void *) __get_free_page(GFP_KERNEL);
141 if (pt == NULL) {
142 return 0;
144 atomic_inc(&mem_map[MAP_NR(pt)].count);
145 set_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
146 atomic_inc(&agp_bridge.current_memory_agp);
147 return (unsigned long) pt;
150 static void agp_destroy_page(unsigned long page)
152 void *pt = (void *) page;
154 if (pt == NULL) {
155 return;
157 atomic_dec(&mem_map[MAP_NR(pt)].count);
158 clear_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
159 wake_up(&mem_map[MAP_NR(pt)].wait);
160 free_page((unsigned long) pt);
161 atomic_dec(&agp_bridge.current_memory_agp);
164 /* End Basic Page Allocation Routines */
167 * Generic routines for handling agp_memory structures -
168 * They use the basic page allocation routines to do the
169 * brunt of the work.
173 static void agp_free_key(int key)
176 if (key < 0) {
177 return;
179 if (key < MAXKEY) {
180 clear_bit(key, agp_bridge.key_list);
184 static int agp_get_key(void)
186 int bit;
188 bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY);
189 if (bit < MAXKEY) {
190 set_bit(bit, agp_bridge.key_list);
191 return bit;
193 return -1;
196 static agp_memory *agp_create_memory(int scratch_pages)
198 agp_memory *new;
200 new = kmalloc(sizeof(agp_memory), GFP_KERNEL);
202 if (new == NULL) {
203 return NULL;
205 memset(new, 0, sizeof(agp_memory));
206 new->key = agp_get_key();
208 if (new->key < 0) {
209 kfree(new);
210 return NULL;
212 new->memory = vmalloc(PAGE_SIZE * scratch_pages);
214 if (new->memory == NULL) {
215 agp_free_key(new->key);
216 kfree(new);
217 return NULL;
219 new->num_scratch_pages = scratch_pages;
220 return new;
223 void agp_free_memory(agp_memory * curr)
225 int i;
227 if (curr == NULL) {
228 return;
230 if (curr->is_bound == TRUE) {
231 agp_unbind_memory(curr);
233 if (curr->type != 0) {
234 agp_bridge.free_by_type(curr);
235 return;
237 if (curr->page_count != 0) {
238 for (i = 0; i < curr->page_count; i++) {
239 curr->memory[i] &= ~(0x00000fff);
240 agp_destroy_page((unsigned long)
241 phys_to_virt(curr->memory[i]));
244 agp_free_key(curr->key);
245 vfree(curr->memory);
246 kfree(curr);
247 MOD_DEC_USE_COUNT;
250 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
252 agp_memory *agp_allocate_memory(size_t page_count, u32 type)
254 int scratch_pages;
255 agp_memory *new;
256 int i;
258 if ((atomic_read(&agp_bridge.current_memory_agp) + page_count) >
259 agp_bridge.max_memory_agp) {
260 return NULL;
263 if (type != 0) {
264 new = agp_bridge.alloc_by_type(page_count, type);
265 return new;
267 /* We always increase the module count, since free auto-decrements
268 * it
271 MOD_INC_USE_COUNT;
273 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
275 new = agp_create_memory(scratch_pages);
277 if (new == NULL) {
278 MOD_DEC_USE_COUNT;
279 return NULL;
281 for (i = 0; i < page_count; i++) {
282 new->memory[i] = agp_alloc_page();
284 if (new->memory[i] == 0) {
285 /* Free this structure */
286 agp_free_memory(new);
287 return NULL;
289 new->memory[i] =
290 agp_bridge.mask_memory(
291 virt_to_phys((void *) new->memory[i]),
292 type);
293 new->page_count++;
296 return new;
299 /* End - Generic routines for handling agp_memory structures */
301 static int agp_return_size(void)
303 int current_size;
304 void *temp;
306 temp = agp_bridge.current_size;
308 switch (agp_bridge.size_type) {
309 case U8_APER_SIZE:
310 current_size = A_SIZE_8(temp)->size;
311 break;
312 case U16_APER_SIZE:
313 current_size = A_SIZE_16(temp)->size;
314 break;
315 case U32_APER_SIZE:
316 current_size = A_SIZE_32(temp)->size;
317 break;
318 case FIXED_APER_SIZE:
319 current_size = A_SIZE_FIX(temp)->size;
320 break;
321 default:
322 current_size = 0;
323 break;
326 return current_size;
329 /* Routine to copy over information structure */
331 void agp_copy_info(agp_kern_info * info)
333 memset(info, 0, sizeof(agp_kern_info));
334 info->version.major = agp_bridge.version->major;
335 info->version.minor = agp_bridge.version->minor;
336 info->device = agp_bridge.dev;
337 info->chipset = agp_bridge.type;
338 info->mode = agp_bridge.mode;
339 info->aper_base = agp_bridge.gart_bus_addr;
340 info->aper_size = agp_return_size();
341 info->max_memory = agp_bridge.max_memory_agp;
342 info->current_memory = atomic_read(&agp_bridge.current_memory_agp);
345 /* End - Routine to copy over information structure */
348 * Routines for handling swapping of agp_memory into the GATT -
349 * These routines take agp_memory and insert them into the GATT.
350 * They call device specific routines to actually write to the GATT.
353 int agp_bind_memory(agp_memory * curr, off_t pg_start)
355 int ret_val;
357 if ((curr == NULL) || (curr->is_bound == TRUE)) {
358 return -EINVAL;
360 if (curr->is_flushed == FALSE) {
361 CACHE_FLUSH();
362 curr->is_flushed = TRUE;
364 ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type);
366 if (ret_val != 0) {
367 return ret_val;
369 curr->is_bound = TRUE;
370 curr->pg_start = pg_start;
371 return 0;
374 int agp_unbind_memory(agp_memory * curr)
376 int ret_val;
378 if (curr == NULL) {
379 return -EINVAL;
381 if (curr->is_bound != TRUE) {
382 return -EINVAL;
384 ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type);
386 if (ret_val != 0) {
387 return ret_val;
389 curr->is_bound = FALSE;
390 curr->pg_start = 0;
391 return 0;
394 /* End - Routines for handling swapping of agp_memory into the GATT */
397 * Driver routines - start
398 * Currently this module supports the
399 * i810, 440lx, 440bx, 440gx, via vp3, via mvp3,
400 * amd irongate, ALi M1541 and generic support for the
401 * SiS chipsets.
404 /* Generic Agp routines - Start */
406 static void agp_generic_agp_enable(u32 mode)
408 struct pci_dev *device = NULL;
409 u32 command, scratch, cap_id;
410 u8 cap_ptr;
412 pci_read_config_dword(agp_bridge.dev,
413 agp_bridge.capndx + 4,
414 &command);
417 * PASS1: go throu all devices that claim to be
418 * AGP devices and collect their data.
421 while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8,
422 device)) != NULL) {
423 pci_read_config_dword(device, 0x04, &scratch);
425 if (!(scratch & 0x00100000))
426 continue;
428 pci_read_config_byte(device, 0x34, &cap_ptr);
430 if (cap_ptr != 0x00) {
431 do {
432 pci_read_config_dword(device,
433 cap_ptr, &cap_id);
435 if ((cap_id & 0xff) != 0x02)
436 cap_ptr = (cap_id >> 8) & 0xff;
438 while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
440 if (cap_ptr != 0x00) {
442 * Ok, here we have a AGP device. Disable impossible
443 * settings, and adjust the readqueue to the minimum.
446 pci_read_config_dword(device, cap_ptr + 4, &scratch);
448 /* adjust RQ depth */
449 command =
450 ((command & ~0xff000000) |
451 min((mode & 0xff000000),
452 min((command & 0xff000000),
453 (scratch & 0xff000000))));
455 /* disable SBA if it's not supported */
456 if (!((command & 0x00000200) &&
457 (scratch & 0x00000200) &&
458 (mode & 0x00000200)))
459 command &= ~0x00000200;
461 /* disable FW if it's not supported */
462 if (!((command & 0x00000010) &&
463 (scratch & 0x00000010) &&
464 (mode & 0x00000010)))
465 command &= ~0x00000010;
467 if (!((command & 4) &&
468 (scratch & 4) &&
469 (mode & 4)))
470 command &= ~0x00000004;
472 if (!((command & 2) &&
473 (scratch & 2) &&
474 (mode & 2)))
475 command &= ~0x00000002;
477 if (!((command & 1) &&
478 (scratch & 1) &&
479 (mode & 1)))
480 command &= ~0x00000001;
484 * PASS2: Figure out the 4X/2X/1X setting and enable the
485 * target (our motherboard chipset).
488 if (command & 4) {
489 command &= ~3; /* 4X */
491 if (command & 2) {
492 command &= ~5; /* 2X */
494 if (command & 1) {
495 command &= ~6; /* 1X */
497 command |= 0x00000100;
499 pci_write_config_dword(agp_bridge.dev,
500 agp_bridge.capndx + 8,
501 command);
504 * PASS3: Go throu all AGP devices and update the
505 * command registers.
508 while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8,
509 device)) != NULL) {
510 pci_read_config_dword(device, 0x04, &scratch);
512 if (!(scratch & 0x00100000))
513 continue;
515 pci_read_config_byte(device, 0x34, &cap_ptr);
517 if (cap_ptr != 0x00) {
518 do {
519 pci_read_config_dword(device,
520 cap_ptr, &cap_id);
522 if ((cap_id & 0xff) != 0x02)
523 cap_ptr = (cap_id >> 8) & 0xff;
525 while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
527 if (cap_ptr != 0x00)
528 pci_write_config_dword(device, cap_ptr + 8, command);
532 static int agp_generic_create_gatt_table(void)
534 char *table;
535 char *table_end;
536 int size;
537 int page_order;
538 int num_entries;
539 int i;
540 void *temp;
542 table = NULL;
543 i = agp_bridge.aperture_size_idx;
544 temp = agp_bridge.current_size;
545 size = page_order = num_entries = 0;
547 if (agp_bridge.size_type != FIXED_APER_SIZE) {
548 do {
549 switch (agp_bridge.size_type) {
550 case U8_APER_SIZE:
551 size = A_SIZE_8(temp)->size;
552 page_order =
553 A_SIZE_8(temp)->page_order;
554 num_entries =
555 A_SIZE_8(temp)->num_entries;
556 break;
557 case U16_APER_SIZE:
558 size = A_SIZE_16(temp)->size;
559 page_order = A_SIZE_16(temp)->page_order;
560 num_entries = A_SIZE_16(temp)->num_entries;
561 break;
562 case U32_APER_SIZE:
563 size = A_SIZE_32(temp)->size;
564 page_order = A_SIZE_32(temp)->page_order;
565 num_entries = A_SIZE_32(temp)->num_entries;
566 break;
567 /* This case will never really happen. */
568 case FIXED_APER_SIZE:
569 default:
570 size = page_order = num_entries = 0;
571 break;
574 table = (char *) __get_free_pages(GFP_KERNEL,
575 page_order);
577 if (table == NULL) {
578 i++;
579 switch (agp_bridge.size_type) {
580 case U8_APER_SIZE:
581 agp_bridge.current_size = A_IDX8();
582 break;
583 case U16_APER_SIZE:
584 agp_bridge.current_size = A_IDX16();
585 break;
586 case U32_APER_SIZE:
587 agp_bridge.current_size = A_IDX32();
588 break;
589 /* This case will never really
590 * happen.
592 case FIXED_APER_SIZE:
593 default:
594 agp_bridge.current_size =
595 agp_bridge.current_size;
596 break;
598 } else {
599 agp_bridge.aperture_size_idx = i;
601 } while ((table == NULL) &&
602 (i < agp_bridge.num_aperture_sizes));
603 } else {
604 size = ((aper_size_info_fixed *) temp)->size;
605 page_order = ((aper_size_info_fixed *) temp)->page_order;
606 num_entries = ((aper_size_info_fixed *) temp)->num_entries;
607 table = (char *) __get_free_pages(GFP_KERNEL, page_order);
610 if (table == NULL) {
611 return -ENOMEM;
613 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
615 for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
616 set_bit(PG_reserved, &mem_map[i].flags);
619 agp_bridge.gatt_table_real = (unsigned long *) table;
620 CACHE_FLUSH();
621 agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
622 (PAGE_SIZE * (1 << page_order)));
623 CACHE_FLUSH();
625 if (agp_bridge.gatt_table == NULL) {
626 for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
627 clear_bit(PG_reserved, &mem_map[i].flags);
630 free_pages((unsigned long) table, page_order);
632 return -ENOMEM;
634 agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real);
636 for (i = 0; i < num_entries; i++) {
637 agp_bridge.gatt_table[i] =
638 (unsigned long) agp_bridge.scratch_page;
641 return 0;
644 static int agp_generic_free_gatt_table(void)
646 int i;
647 int page_order;
648 char *table, *table_end;
649 void *temp;
651 temp = agp_bridge.current_size;
653 switch (agp_bridge.size_type) {
654 case U8_APER_SIZE:
655 page_order = A_SIZE_8(temp)->page_order;
656 break;
657 case U16_APER_SIZE:
658 page_order = A_SIZE_16(temp)->page_order;
659 break;
660 case U32_APER_SIZE:
661 page_order = A_SIZE_32(temp)->page_order;
662 break;
663 case FIXED_APER_SIZE:
664 page_order = A_SIZE_FIX(temp)->page_order;
665 break;
666 default:
667 page_order = 0;
668 break;
671 /* Do not worry about freeing memory, because if this is
672 * called, then all agp memory is deallocated and removed
673 * from the table.
676 iounmap(agp_bridge.gatt_table);
677 table = (char *) agp_bridge.gatt_table_real;
678 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
680 for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
681 clear_bit(PG_reserved, &mem_map[i].flags);
684 free_pages((unsigned long) agp_bridge.gatt_table_real, page_order);
685 return 0;
688 static int agp_generic_insert_memory(agp_memory * mem,
689 off_t pg_start, int type)
691 int i, j, num_entries;
692 void *temp;
694 temp = agp_bridge.current_size;
696 switch (agp_bridge.size_type) {
697 case U8_APER_SIZE:
698 num_entries = A_SIZE_8(temp)->num_entries;
699 break;
700 case U16_APER_SIZE:
701 num_entries = A_SIZE_16(temp)->num_entries;
702 break;
703 case U32_APER_SIZE:
704 num_entries = A_SIZE_32(temp)->num_entries;
705 break;
706 case FIXED_APER_SIZE:
707 num_entries = A_SIZE_FIX(temp)->num_entries;
708 break;
709 default:
710 num_entries = 0;
711 break;
714 if (type != 0 || mem->type != 0) {
715 /* The generic routines know nothing of memory types */
716 return -EINVAL;
718 if ((pg_start + mem->page_count) > num_entries) {
719 return -EINVAL;
721 j = pg_start;
723 while (j < (pg_start + mem->page_count)) {
724 if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
725 return -EBUSY;
727 j++;
730 if (mem->is_flushed == FALSE) {
731 CACHE_FLUSH();
732 mem->is_flushed = TRUE;
734 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
735 agp_bridge.gatt_table[j] = mem->memory[i];
738 agp_bridge.tlb_flush(mem);
739 return 0;
742 static int agp_generic_remove_memory(agp_memory * mem, off_t pg_start,
743 int type)
745 int i;
747 if (type != 0 || mem->type != 0) {
748 /* The generic routines know nothing of memory types */
749 return -EINVAL;
751 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
752 agp_bridge.gatt_table[i] =
753 (unsigned long) agp_bridge.scratch_page;
756 agp_bridge.tlb_flush(mem);
757 return 0;
760 static agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
762 return NULL;
765 static void agp_generic_free_by_type(agp_memory * curr)
767 if (curr->memory != NULL) {
768 vfree(curr->memory);
770 agp_free_key(curr->key);
771 kfree(curr);
774 void agp_enable(u32 mode)
776 agp_bridge.agp_enable(mode);
779 /* End - Generic Agp routines */
781 #ifdef CONFIG_AGP_I810
782 static aper_size_info_fixed intel_i810_sizes[] =
784 {64, 16384, 4},
785 /* The 32M mode still requires a 64k gatt */
786 {32, 8192, 4}
789 #define AGP_DCACHE_MEMORY 1
790 #define AGP_PHYS_MEMORY 2
792 static gatt_mask intel_i810_masks[] =
794 {I810_PTE_VALID, 0},
795 {(I810_PTE_VALID | I810_PTE_LOCAL), AGP_DCACHE_MEMORY},
796 {I810_PTE_VALID, 0}
799 static struct _intel_i810_private {
800 struct pci_dev *i810_dev; /* device one */
801 volatile u8 *registers;
802 int num_dcache_entries;
803 } intel_i810_private;
805 static int intel_i810_fetch_size(void)
807 u32 smram_miscc;
808 aper_size_info_fixed *values;
810 pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc);
811 values = A_SIZE_FIX(agp_bridge.aperture_sizes);
813 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
814 printk(KERN_WARNING PFX "i810 is disabled\n");
815 return 0;
817 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
818 agp_bridge.previous_size =
819 agp_bridge.current_size = (void *) (values + 1);
820 agp_bridge.aperture_size_idx = 1;
821 return values[1].size;
822 } else {
823 agp_bridge.previous_size =
824 agp_bridge.current_size = (void *) (values);
825 agp_bridge.aperture_size_idx = 0;
826 return values[0].size;
829 return 0;
832 static int intel_i810_configure(void)
834 aper_size_info_fixed *current_size;
835 u32 temp;
836 int i;
838 current_size = A_SIZE_FIX(agp_bridge.current_size);
840 pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp);
841 temp &= 0xfff80000;
843 intel_i810_private.registers =
844 (volatile u8 *) ioremap(temp, 128 * 4096);
846 if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL)
847 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
848 /* This will need to be dynamically assigned */
849 printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n");
850 intel_i810_private.num_dcache_entries = 1024;
852 pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp);
853 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
854 OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL,
855 agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED);
856 CACHE_FLUSH();
858 if (agp_bridge.needs_scratch_page == TRUE) {
859 for (i = 0; i < current_size->num_entries; i++) {
860 OUTREG32(intel_i810_private.registers,
861 I810_PTE_BASE + (i * 4),
862 agp_bridge.scratch_page);
865 return 0;
868 static void intel_i810_cleanup(void)
870 OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0);
871 iounmap((void *) intel_i810_private.registers);
874 static void intel_i810_tlbflush(agp_memory * mem)
876 return;
879 static void intel_i810_agp_enable(u32 mode)
881 return;
884 static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start,
885 int type)
887 int i, j, num_entries;
888 void *temp;
890 temp = agp_bridge.current_size;
891 num_entries = A_SIZE_FIX(temp)->num_entries;
893 if ((pg_start + mem->page_count) > num_entries) {
894 return -EINVAL;
896 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
897 if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
898 return -EBUSY;
902 if (type != 0 || mem->type != 0) {
903 if ((type == AGP_DCACHE_MEMORY) &&
904 (mem->type == AGP_DCACHE_MEMORY)) {
905 /* special insert */
906 CACHE_FLUSH();
907 for (i = pg_start;
908 i < (pg_start + mem->page_count); i++) {
909 OUTREG32(intel_i810_private.registers,
910 I810_PTE_BASE + (i * 4),
911 (i * 4096) | I810_PTE_LOCAL |
912 I810_PTE_VALID);
914 CACHE_FLUSH();
915 agp_bridge.tlb_flush(mem);
916 return 0;
918 if((type == AGP_PHYS_MEMORY) &&
919 (mem->type == AGP_PHYS_MEMORY)) {
920 goto insert;
922 return -EINVAL;
925 insert:
926 CACHE_FLUSH();
927 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
928 OUTREG32(intel_i810_private.registers,
929 I810_PTE_BASE + (j * 4), mem->memory[i]);
931 CACHE_FLUSH();
933 agp_bridge.tlb_flush(mem);
934 return 0;
937 static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start,
938 int type)
940 int i;
942 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
943 OUTREG32(intel_i810_private.registers,
944 I810_PTE_BASE + (i * 4),
945 agp_bridge.scratch_page);
948 agp_bridge.tlb_flush(mem);
949 return 0;
952 static agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
954 agp_memory *new;
956 if (type == AGP_DCACHE_MEMORY) {
957 if (pg_count != intel_i810_private.num_dcache_entries) {
958 return NULL;
960 new = agp_create_memory(1);
962 if (new == NULL) {
963 return NULL;
965 new->type = AGP_DCACHE_MEMORY;
966 new->page_count = pg_count;
967 new->num_scratch_pages = 0;
968 vfree(new->memory);
969 MOD_INC_USE_COUNT;
970 return new;
972 if(type == AGP_PHYS_MEMORY) {
973 /* The I810 requires a physical address to program
974 * it's mouse pointer into hardware. However the
975 * Xserver still writes to it through the agp
976 * aperture
978 if (pg_count != 1) {
979 return NULL;
981 new = agp_create_memory(1);
983 if (new == NULL) {
984 return NULL;
986 MOD_INC_USE_COUNT;
987 new->memory[0] = agp_alloc_page();
989 if (new->memory[0] == 0) {
990 /* Free this structure */
991 agp_free_memory(new);
992 return NULL;
994 new->memory[0] =
995 agp_bridge.mask_memory(
996 virt_to_phys((void *) new->memory[0]),
997 type);
998 new->page_count = 1;
999 new->num_scratch_pages = 1;
1000 new->type = AGP_PHYS_MEMORY;
1001 new->physical = virt_to_phys((void *) new->memory[0]);
1002 return new;
1005 return NULL;
1008 static void intel_i810_free_by_type(agp_memory * curr)
1010 agp_free_key(curr->key);
1011 if(curr->type == AGP_PHYS_MEMORY) {
1012 agp_destroy_page((unsigned long)
1013 phys_to_virt(curr->memory[0]));
1014 vfree(curr->memory);
1016 kfree(curr);
1017 MOD_DEC_USE_COUNT;
1020 static unsigned long intel_i810_mask_memory(unsigned long addr, int type)
1022 /* Type checking must be done elsewhere */
1023 return addr | agp_bridge.masks[type].mask;
1026 static int __init intel_i810_setup(struct pci_dev *i810_dev)
1028 intel_i810_private.i810_dev = i810_dev;
1030 agp_bridge.masks = intel_i810_masks;
1031 agp_bridge.num_of_masks = 2;
1032 agp_bridge.aperture_sizes = (void *) intel_i810_sizes;
1033 agp_bridge.size_type = FIXED_APER_SIZE;
1034 agp_bridge.num_aperture_sizes = 2;
1035 agp_bridge.dev_private_data = (void *) &intel_i810_private;
1036 agp_bridge.needs_scratch_page = TRUE;
1037 agp_bridge.configure = intel_i810_configure;
1038 agp_bridge.fetch_size = intel_i810_fetch_size;
1039 agp_bridge.cleanup = intel_i810_cleanup;
1040 agp_bridge.tlb_flush = intel_i810_tlbflush;
1041 agp_bridge.mask_memory = intel_i810_mask_memory;
1042 agp_bridge.agp_enable = intel_i810_agp_enable;
1043 agp_bridge.cache_flush = global_cache_flush;
1044 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1045 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1046 agp_bridge.insert_memory = intel_i810_insert_entries;
1047 agp_bridge.remove_memory = intel_i810_remove_entries;
1048 agp_bridge.alloc_by_type = intel_i810_alloc_by_type;
1049 agp_bridge.free_by_type = intel_i810_free_by_type;
1051 return 0;
1054 #endif /* CONFIG_AGP_I810 */
1056 #ifdef CONFIG_AGP_INTEL
1058 static int intel_fetch_size(void)
1060 int i;
1061 u16 temp;
1062 aper_size_info_16 *values;
1064 pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp);
1065 values = A_SIZE_16(agp_bridge.aperture_sizes);
1067 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1068 if (temp == values[i].size_value) {
1069 agp_bridge.previous_size =
1070 agp_bridge.current_size = (void *) (values + i);
1071 agp_bridge.aperture_size_idx = i;
1072 return values[i].size;
1076 return 0;
1079 static void intel_tlbflush(agp_memory * mem)
1081 pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200);
1082 pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
1085 static void intel_cleanup(void)
1087 u16 temp;
1088 aper_size_info_16 *previous_size;
1090 previous_size = A_SIZE_16(agp_bridge.previous_size);
1091 pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
1092 pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
1093 pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
1094 previous_size->size_value);
1097 static int intel_configure(void)
1099 u32 temp;
1100 u16 temp2;
1101 aper_size_info_16 *current_size;
1103 current_size = A_SIZE_16(agp_bridge.current_size);
1105 /* aperture size */
1106 pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
1107 current_size->size_value);
1109 /* address to map to */
1110 pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
1111 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1113 /* attbase - aperture base */
1114 pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
1115 agp_bridge.gatt_bus_addr);
1117 /* agpctrl */
1118 pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
1120 /* paccfg/nbxcfg */
1121 pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
1122 pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG,
1123 (temp2 & ~(1 << 10)) | (1 << 9));
1124 /* clear any possible error conditions */
1125 pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7);
1126 return 0;
1129 static unsigned long intel_mask_memory(unsigned long addr, int type)
1131 /* Memory type is ignored */
1133 return addr | agp_bridge.masks[0].mask;
1137 /* Setup function */
1138 static gatt_mask intel_generic_masks[] =
1140 {0x00000017, 0}
1143 static aper_size_info_16 intel_generic_sizes[7] =
1145 {256, 65536, 6, 0},
1146 {128, 32768, 5, 32},
1147 {64, 16384, 4, 48},
1148 {32, 8192, 3, 56},
1149 {16, 4096, 2, 60},
1150 {8, 2048, 1, 62},
1151 {4, 1024, 0, 63}
1154 static int __init intel_generic_setup (struct pci_dev *pdev)
1156 agp_bridge.masks = intel_generic_masks;
1157 agp_bridge.num_of_masks = 1;
1158 agp_bridge.aperture_sizes = (void *) intel_generic_sizes;
1159 agp_bridge.size_type = U16_APER_SIZE;
1160 agp_bridge.num_aperture_sizes = 7;
1161 agp_bridge.dev_private_data = NULL;
1162 agp_bridge.needs_scratch_page = FALSE;
1163 agp_bridge.configure = intel_configure;
1164 agp_bridge.fetch_size = intel_fetch_size;
1165 agp_bridge.cleanup = intel_cleanup;
1166 agp_bridge.tlb_flush = intel_tlbflush;
1167 agp_bridge.mask_memory = intel_mask_memory;
1168 agp_bridge.agp_enable = agp_generic_agp_enable;
1169 agp_bridge.cache_flush = global_cache_flush;
1170 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1171 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1172 agp_bridge.insert_memory = agp_generic_insert_memory;
1173 agp_bridge.remove_memory = agp_generic_remove_memory;
1174 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1175 agp_bridge.free_by_type = agp_generic_free_by_type;
1177 return 0;
1179 (void) pdev; /* unused */
1182 #endif /* CONFIG_AGP_INTEL */
1184 #ifdef CONFIG_AGP_VIA
1186 static int via_fetch_size(void)
1188 int i;
1189 u8 temp;
1190 aper_size_info_8 *values;
1192 values = A_SIZE_8(agp_bridge.aperture_sizes);
1193 pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp);
1194 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1195 if (temp == values[i].size_value) {
1196 agp_bridge.previous_size =
1197 agp_bridge.current_size = (void *) (values + i);
1198 agp_bridge.aperture_size_idx = i;
1199 return values[i].size;
1203 return 0;
1206 static int via_configure(void)
1208 u32 temp;
1209 aper_size_info_8 *current_size;
1211 current_size = A_SIZE_8(agp_bridge.current_size);
1212 /* aperture size */
1213 pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
1214 current_size->size_value);
1215 /* address to map too */
1216 pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp);
1217 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1219 /* GART control register */
1220 pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
1222 /* attbase - aperture GATT base */
1223 pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE,
1224 (agp_bridge.gatt_bus_addr & 0xfffff000) | 3);
1225 return 0;
1228 static void via_cleanup(void)
1230 aper_size_info_8 *previous_size;
1232 previous_size = A_SIZE_8(agp_bridge.previous_size);
1233 pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, 0);
1234 pci_write_config_byte(agp_bridge.dev, VIA_APSIZE,
1235 previous_size->size_value);
1238 static void via_tlbflush(agp_memory * mem)
1240 pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000008f);
1241 pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f);
1244 static unsigned long via_mask_memory(unsigned long addr, int type)
1246 /* Memory type is ignored */
1248 return addr | agp_bridge.masks[0].mask;
1251 static aper_size_info_8 via_generic_sizes[7] =
1253 {256, 65536, 6, 0},
1254 {128, 32768, 5, 128},
1255 {64, 16384, 4, 192},
1256 {32, 8192, 3, 224},
1257 {16, 4096, 2, 240},
1258 {8, 2048, 1, 248},
1259 {4, 1024, 0, 252}
1262 static gatt_mask via_generic_masks[] =
1264 {0x00000000, 0}
1267 static int __init via_generic_setup (struct pci_dev *pdev)
1269 agp_bridge.masks = via_generic_masks;
1270 agp_bridge.num_of_masks = 1;
1271 agp_bridge.aperture_sizes = (void *) via_generic_sizes;
1272 agp_bridge.size_type = U8_APER_SIZE;
1273 agp_bridge.num_aperture_sizes = 7;
1274 agp_bridge.dev_private_data = NULL;
1275 agp_bridge.needs_scratch_page = FALSE;
1276 agp_bridge.configure = via_configure;
1277 agp_bridge.fetch_size = via_fetch_size;
1278 agp_bridge.cleanup = via_cleanup;
1279 agp_bridge.tlb_flush = via_tlbflush;
1280 agp_bridge.mask_memory = via_mask_memory;
1281 agp_bridge.agp_enable = agp_generic_agp_enable;
1282 agp_bridge.cache_flush = global_cache_flush;
1283 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1284 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1285 agp_bridge.insert_memory = agp_generic_insert_memory;
1286 agp_bridge.remove_memory = agp_generic_remove_memory;
1287 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1288 agp_bridge.free_by_type = agp_generic_free_by_type;
1290 return 0;
1292 (void) pdev; /* unused */
1295 #endif /* CONFIG_AGP_VIA */
1297 #ifdef CONFIG_AGP_SIS
1299 static int sis_fetch_size(void)
1301 u8 temp_size;
1302 int i;
1303 aper_size_info_8 *values;
1305 pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size);
1306 values = A_SIZE_8(agp_bridge.aperture_sizes);
1307 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1308 if ((temp_size == values[i].size_value) ||
1309 ((temp_size & ~(0x03)) ==
1310 (values[i].size_value & ~(0x03)))) {
1311 agp_bridge.previous_size =
1312 agp_bridge.current_size = (void *) (values + i);
1314 agp_bridge.aperture_size_idx = i;
1315 return values[i].size;
1319 return 0;
1323 static void sis_tlbflush(agp_memory * mem)
1325 pci_write_config_byte(agp_bridge.dev, SIS_TLBFLUSH, 0x02);
1328 static int sis_configure(void)
1330 u32 temp;
1331 aper_size_info_8 *current_size;
1333 current_size = A_SIZE_8(agp_bridge.current_size);
1334 pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05);
1335 pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp);
1336 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1337 pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE,
1338 agp_bridge.gatt_bus_addr);
1339 pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
1340 current_size->size_value);
1341 return 0;
1344 static void sis_cleanup(void)
1346 aper_size_info_8 *previous_size;
1348 previous_size = A_SIZE_8(agp_bridge.previous_size);
1349 pci_write_config_byte(agp_bridge.dev, SIS_APSIZE,
1350 (previous_size->size_value & ~(0x03)));
1353 static unsigned long sis_mask_memory(unsigned long addr, int type)
1355 /* Memory type is ignored */
1357 return addr | agp_bridge.masks[0].mask;
1360 static aper_size_info_8 sis_generic_sizes[7] =
1362 {256, 65536, 6, 99},
1363 {128, 32768, 5, 83},
1364 {64, 16384, 4, 67},
1365 {32, 8192, 3, 51},
1366 {16, 4096, 2, 35},
1367 {8, 2048, 1, 19},
1368 {4, 1024, 0, 3}
1371 static gatt_mask sis_generic_masks[] =
1373 {0x00000000, 0}
1376 static int __init sis_generic_setup (struct pci_dev *pdev)
1378 agp_bridge.masks = sis_generic_masks;
1379 agp_bridge.num_of_masks = 1;
1380 agp_bridge.aperture_sizes = (void *) sis_generic_sizes;
1381 agp_bridge.size_type = U8_APER_SIZE;
1382 agp_bridge.num_aperture_sizes = 7;
1383 agp_bridge.dev_private_data = NULL;
1384 agp_bridge.needs_scratch_page = FALSE;
1385 agp_bridge.configure = sis_configure;
1386 agp_bridge.fetch_size = sis_fetch_size;
1387 agp_bridge.cleanup = sis_cleanup;
1388 agp_bridge.tlb_flush = sis_tlbflush;
1389 agp_bridge.mask_memory = sis_mask_memory;
1390 agp_bridge.agp_enable = agp_generic_agp_enable;
1391 agp_bridge.cache_flush = global_cache_flush;
1392 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1393 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1394 agp_bridge.insert_memory = agp_generic_insert_memory;
1395 agp_bridge.remove_memory = agp_generic_remove_memory;
1396 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1397 agp_bridge.free_by_type = agp_generic_free_by_type;
1399 return 0;
1402 #endif /* CONFIG_AGP_SIS */
1404 #ifdef CONFIG_AGP_AMD
1406 static struct _amd_irongate_private {
1407 volatile u8 *registers;
1408 } amd_irongate_private;
1410 static int amd_irongate_fetch_size(void)
1412 int i;
1413 u32 temp;
1414 aper_size_info_32 *values;
1416 pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
1417 temp = (temp & 0x0000000e);
1418 values = A_SIZE_32(agp_bridge.aperture_sizes);
1419 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1420 if (temp == values[i].size_value) {
1421 agp_bridge.previous_size =
1422 agp_bridge.current_size = (void *) (values + i);
1424 agp_bridge.aperture_size_idx = i;
1425 return values[i].size;
1429 return 0;
1432 static int amd_irongate_configure(void)
1434 aper_size_info_32 *current_size;
1435 unsigned long addr;
1436 u32 temp;
1437 u16 enable_reg;
1439 current_size = A_SIZE_32(agp_bridge.current_size);
1441 /* Get the memory mapped registers */
1442 pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp);
1443 temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1444 amd_irongate_private.registers = (volatile u8 *) ioremap(temp, 4096);
1446 /* Write out the address of the gatt table */
1447 OUTREG32(amd_irongate_private.registers, AMD_ATTBASE,
1448 agp_bridge.gatt_bus_addr);
1450 /* Write the Sync register */
1451 pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80);
1453 /* Write the enable register */
1454 enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
1455 enable_reg = (enable_reg | 0x0004);
1456 OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
1458 /* Write out the size register */
1459 pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
1460 temp = (((temp & ~(0x0000000e)) | current_size->size_value)
1461 | 0x00000001);
1462 pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
1464 /* Flush the tlb */
1465 OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
1467 /* Get the address for the gart region */
1468 pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp);
1469 addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1470 #ifdef __alpha__
1471 /* ??? Presumably what is wanted is the bus address as seen
1472 from the CPU side, since it appears that this value is
1473 exported to userland via an ioctl. The terminology below
1474 is confused, mixing `physical address' with `bus address',
1475 as x86 folk are wont to do. */
1476 addr = virt_to_phys(ioremap(addr, 0));
1477 #endif
1478 agp_bridge.gart_bus_addr = addr;
1479 return 0;
1482 static void amd_irongate_cleanup(void)
1484 aper_size_info_32 *previous_size;
1485 u32 temp;
1486 u16 enable_reg;
1488 previous_size = A_SIZE_32(agp_bridge.previous_size);
1490 enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
1491 enable_reg = (enable_reg & ~(0x0004));
1492 OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
1494 /* Write back the previous size and disable gart translation */
1495 pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp);
1496 temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
1497 pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp);
1498 iounmap((void *) amd_irongate_private.registers);
1502 * This routine could be implemented by taking the addresses
1503 * written to the GATT, and flushing them individually. However
1504 * currently it just flushes the whole table. Which is probably
1505 * more efficent, since agp_memory blocks can be a large number of
1506 * entries.
1509 static void amd_irongate_tlbflush(agp_memory * temp)
1511 OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
1514 static unsigned long amd_irongate_mask_memory(unsigned long addr, int type)
1516 /* Only type 0 is supported by the irongate */
1518 return addr | agp_bridge.masks[0].mask;
1521 static aper_size_info_32 amd_irongate_sizes[7] =
1523 {2048, 524288, 9, 0x0000000c},
1524 {1024, 262144, 8, 0x0000000a},
1525 {512, 131072, 7, 0x00000008},
1526 {256, 65536, 6, 0x00000006},
1527 {128, 32768, 5, 0x00000004},
1528 {64, 16384, 4, 0x00000002},
1529 {32, 8192, 3, 0x00000000}
1532 static gatt_mask amd_irongate_masks[] =
1534 {0x00000001, 0}
1537 static int __init amd_irongate_setup (struct pci_dev *pdev)
1539 agp_bridge.masks = amd_irongate_masks;
1540 agp_bridge.num_of_masks = 1;
1541 agp_bridge.aperture_sizes = (void *) amd_irongate_sizes;
1542 agp_bridge.size_type = U32_APER_SIZE;
1543 agp_bridge.num_aperture_sizes = 7;
1544 agp_bridge.dev_private_data = (void *) &amd_irongate_private;
1545 agp_bridge.needs_scratch_page = FALSE;
1546 agp_bridge.configure = amd_irongate_configure;
1547 agp_bridge.fetch_size = amd_irongate_fetch_size;
1548 agp_bridge.cleanup = amd_irongate_cleanup;
1549 agp_bridge.tlb_flush = amd_irongate_tlbflush;
1550 agp_bridge.mask_memory = amd_irongate_mask_memory;
1551 agp_bridge.agp_enable = agp_generic_agp_enable;
1552 agp_bridge.cache_flush = global_cache_flush;
1553 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1554 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1555 agp_bridge.insert_memory = agp_generic_insert_memory;
1556 agp_bridge.remove_memory = agp_generic_remove_memory;
1557 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1558 agp_bridge.free_by_type = agp_generic_free_by_type;
1560 return 0;
1562 (void) pdev; /* unused */
1565 #endif /* CONFIG_AGP_AMD */
1567 #ifdef CONFIG_AGP_ALI
1569 static int ali_fetch_size(void)
1571 int i;
1572 u32 temp;
1573 aper_size_info_32 *values;
1575 pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
1576 temp &= ~(0xfffffff0);
1577 values = A_SIZE_32(agp_bridge.aperture_sizes);
1579 for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
1580 if (temp == values[i].size_value) {
1581 agp_bridge.previous_size =
1582 agp_bridge.current_size = (void *) (values + i);
1583 agp_bridge.aperture_size_idx = i;
1584 return values[i].size;
1588 return 0;
1591 static void ali_tlbflush(agp_memory * mem)
1593 u32 temp;
1595 pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
1596 pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
1597 ((temp & 0xffffff00) | 0x00000090));
1598 pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
1599 ((temp & 0xffffff00) | 0x00000010));
1602 static void ali_cleanup(void)
1604 aper_size_info_32 *previous_size;
1605 u32 temp;
1607 previous_size = A_SIZE_32(agp_bridge.previous_size);
1609 pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
1610 pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
1611 ((temp & 0xffffff00) | 0x00000090));
1612 pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
1613 previous_size->size_value);
1616 static int ali_configure(void)
1618 u32 temp;
1619 aper_size_info_32 *current_size;
1621 current_size = A_SIZE_32(agp_bridge.current_size);
1623 /* aperture size and gatt addr */
1624 pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
1625 agp_bridge.gatt_bus_addr | current_size->size_value);
1627 /* tlb control */
1628 pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
1629 pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
1630 ((temp & 0xffffff00) | 0x00000010));
1632 /* address to map to */
1633 pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp);
1634 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1635 return 0;
1638 static unsigned long ali_mask_memory(unsigned long addr, int type)
1640 /* Memory type is ignored */
1642 return addr | agp_bridge.masks[0].mask;
1646 /* Setup function */
1647 static gatt_mask ali_generic_masks[] =
1649 {0x00000000, 0}
1652 static aper_size_info_32 ali_generic_sizes[7] =
1654 {256, 65536, 6, 10},
1655 {128, 32768, 5, 9},
1656 {64, 16384, 4, 8},
1657 {32, 8192, 3, 7},
1658 {16, 4096, 2, 6},
1659 {8, 2048, 1, 4},
1660 {4, 1024, 0, 3}
1663 static int __init ali_generic_setup (struct pci_dev *pdev)
1665 agp_bridge.masks = ali_generic_masks;
1666 agp_bridge.num_of_masks = 1;
1667 agp_bridge.aperture_sizes = (void *) ali_generic_sizes;
1668 agp_bridge.size_type = U32_APER_SIZE;
1669 agp_bridge.num_aperture_sizes = 7;
1670 agp_bridge.dev_private_data = NULL;
1671 agp_bridge.needs_scratch_page = FALSE;
1672 agp_bridge.configure = ali_configure;
1673 agp_bridge.fetch_size = ali_fetch_size;
1674 agp_bridge.cleanup = ali_cleanup;
1675 agp_bridge.tlb_flush = ali_tlbflush;
1676 agp_bridge.mask_memory = ali_mask_memory;
1677 agp_bridge.agp_enable = agp_generic_agp_enable;
1678 agp_bridge.cache_flush = global_cache_flush;
1679 agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
1680 agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
1681 agp_bridge.insert_memory = agp_generic_insert_memory;
1682 agp_bridge.remove_memory = agp_generic_remove_memory;
1683 agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
1684 agp_bridge.free_by_type = agp_generic_free_by_type;
1686 return 0;
1688 (void) pdev; /* unused */
1691 #endif /* CONFIG_AGP_ALI */
1694 /* per-chipset initialization data.
1695 * note -- all chipsets for a single vendor MUST be grouped together
1697 static struct {
1698 unsigned short device_id; /* first, to make table easier to read */
1699 unsigned short vendor_id;
1700 enum chipset_type chipset;
1701 const char *vendor_name;
1702 const char *chipset_name;
1703 int (*chipset_setup) (struct pci_dev *pdev);
1704 } agp_bridge_info[] __initdata = {
1706 #ifdef CONFIG_AGP_ALI
1707 { PCI_DEVICE_ID_AL_M1541_0,
1708 PCI_VENDOR_ID_AL,
1709 ALI_M1541,
1710 "Ali",
1711 "M1541",
1712 ali_generic_setup },
1713 { 0,
1714 PCI_VENDOR_ID_AL,
1715 ALI_GENERIC,
1716 "Ali",
1717 "Generic",
1718 ali_generic_setup },
1719 #endif /* CONFIG_AGP_ALI */
1721 #ifdef CONFIG_AGP_AMD
1722 { PCI_DEVICE_ID_AMD_IRONGATE_0,
1723 PCI_VENDOR_ID_AMD,
1724 AMD_IRONGATE,
1725 "AMD",
1726 "Irongate",
1727 amd_irongate_setup },
1728 { 0,
1729 PCI_VENDOR_ID_AMD,
1730 AMD_GENERIC,
1731 "AMD",
1732 "Generic",
1733 amd_irongate_setup },
1734 #endif /* CONFIG_AGP_AMD */
1736 #ifdef CONFIG_AGP_INTEL
1737 { PCI_DEVICE_ID_INTEL_82443LX_0,
1738 PCI_VENDOR_ID_INTEL,
1739 INTEL_LX,
1740 "Intel",
1741 "440LX",
1742 intel_generic_setup },
1743 { PCI_DEVICE_ID_INTEL_82443BX_0,
1744 PCI_VENDOR_ID_INTEL,
1745 INTEL_BX,
1746 "Intel",
1747 "440BX",
1748 intel_generic_setup },
1749 { PCI_DEVICE_ID_INTEL_82443GX_0,
1750 PCI_VENDOR_ID_INTEL,
1751 INTEL_GX,
1752 "Intel",
1753 "440GX",
1754 intel_generic_setup },
1755 { 0,
1756 PCI_VENDOR_ID_INTEL,
1757 INTEL_GENERIC,
1758 "Intel",
1759 "Generic",
1760 intel_generic_setup },
1761 #endif /* CONFIG_AGP_INTEL */
1763 #ifdef CONFIG_AGP_SIS
1764 { 0,
1765 PCI_VENDOR_ID_SI,
1766 SIS_GENERIC,
1767 "SiS",
1768 "Generic",
1769 sis_generic_setup },
1770 #endif /* CONFIG_AGP_SIS */
1772 #ifdef CONFIG_AGP_VIA
1773 { PCI_DEVICE_ID_VIA_8501_0,
1774 PCI_VENDOR_ID_VIA,
1775 VIA_MVP4,
1776 "Via",
1777 "MVP4",
1778 via_generic_setup },
1779 { PCI_DEVICE_ID_VIA_82C597_0,
1780 PCI_VENDOR_ID_VIA,
1781 VIA_VP3,
1782 "Via",
1783 "VP3",
1784 via_generic_setup },
1785 { PCI_DEVICE_ID_VIA_82C598_0,
1786 PCI_VENDOR_ID_VIA,
1787 VIA_MVP3,
1788 "Via",
1789 "MVP3",
1790 via_generic_setup },
1791 { PCI_DEVICE_ID_VIA_82C691_0,
1792 PCI_VENDOR_ID_VIA,
1793 VIA_APOLLO_PRO,
1794 "Via",
1795 "Apollo Pro",
1796 via_generic_setup },
1797 { 0,
1798 PCI_VENDOR_ID_VIA,
1799 VIA_GENERIC,
1800 "Via",
1801 "Generic",
1802 via_generic_setup },
1803 #endif /* CONFIG_AGP_VIA */
1805 { 0, }, /* dummy final entry, always present */
1809 /* scan table above for supported devices */
1810 static int __init agp_lookup_host_bridge (struct pci_dev *pdev)
1812 int i;
1814 for (i = 0; i < arraysize (agp_bridge_info); i++)
1815 if (pdev->vendor == agp_bridge_info[i].vendor_id)
1816 break;
1818 if (i >= arraysize (agp_bridge_info)) {
1819 printk (KERN_DEBUG PFX "unsupported bridge\n");
1820 return -ENODEV;
1823 while ((i < arraysize (agp_bridge_info)) &&
1824 (agp_bridge_info[i].vendor_id == pdev->vendor)) {
1825 if (pdev->device == agp_bridge_info[i].device_id) {
1826 printk (KERN_INFO PFX "Detected %s %s chipset\n",
1827 agp_bridge_info[i].vendor_name,
1828 agp_bridge_info[i].chipset_name);
1829 agp_bridge.type = agp_bridge_info[i].chipset;
1830 return agp_bridge_info[i].chipset_setup (pdev);
1833 i++;
1836 i--; /* point to vendor generic entry (device_id == 0) */
1838 /* try init anyway, if user requests it AND
1839 * there is a 'generic' bridge entry for this vendor */
1840 if (agp_try_unsupported && agp_bridge_info[i].device_id == 0) {
1841 printk(KERN_WARNING PFX "Trying generic %s routines"
1842 " for device id: %x\n",
1843 agp_bridge_info[i].vendor_name, pdev->device);
1844 agp_bridge.type = agp_bridge_info[i].chipset;
1845 return agp_bridge_info[i].chipset_setup (pdev);
1848 printk(KERN_ERR PFX "Unsupported %s chipset,"
1849 " you might want to try "
1850 "agp_try_unsupported=1.\n",
1851 agp_bridge_info[i].vendor_name);
1852 return -ENODEV;
1856 /* Supported Device Scanning routine */
1858 static int __init agp_find_supported_device(void)
1860 struct pci_dev *dev = NULL;
1861 u8 cap_ptr = 0x00;
1862 u32 cap_id, scratch;
1864 if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL)
1865 return -ENODEV;
1867 agp_bridge.dev = dev;
1869 /* Need to test for I810 here */
1870 #ifdef CONFIG_AGP_I810
1871 if (dev->vendor == PCI_VENDOR_ID_INTEL) {
1872 struct pci_dev *i810_dev;
1874 switch (dev->device) {
1875 case PCI_DEVICE_ID_INTEL_810_0:
1876 i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
1877 PCI_DEVICE_ID_INTEL_810_1,
1878 NULL);
1879 if (i810_dev == NULL) {
1880 printk(KERN_ERR PFX "Detected an Intel i810,"
1881 " but could not find the secondary"
1882 " device.\n");
1883 return -ENODEV;
1885 printk(KERN_INFO PFX "Detected an Intel "
1886 "i810 Chipset.\n");
1887 agp_bridge.type = INTEL_I810;
1888 return intel_i810_setup (i810_dev);
1890 case PCI_DEVICE_ID_INTEL_810_DC100_0:
1891 i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
1892 PCI_DEVICE_ID_INTEL_810_DC100_1,
1893 NULL);
1894 if (i810_dev == NULL) {
1895 printk(KERN_ERR PFX "Detected an Intel i810 "
1896 "DC100, but could not find the "
1897 "secondary device.\n");
1898 return -ENODEV;
1900 printk(KERN_INFO PFX "Detected an Intel i810 "
1901 "DC100 Chipset.\n");
1902 agp_bridge.type = INTEL_I810;
1903 return intel_i810_setup(i810_dev);
1905 case PCI_DEVICE_ID_INTEL_810_E_0:
1906 i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
1907 PCI_DEVICE_ID_INTEL_810_E_1,
1908 NULL);
1909 if (i810_dev == NULL) {
1910 printk(KERN_ERR PFX "Detected an Intel i810 E"
1911 ", but could not find the secondary "
1912 "device.\n");
1913 return -ENODEV;
1915 printk(KERN_INFO PFX "Detected an Intel i810 E "
1916 "Chipset.\n");
1917 agp_bridge.type = INTEL_I810;
1918 return intel_i810_setup(i810_dev);
1920 default:
1921 break;
1924 #endif /* CONFIG_AGP_I810 */
1926 /* find capndx */
1927 pci_read_config_dword(dev, 0x04, &scratch);
1928 if (!(scratch & 0x00100000))
1929 return -ENODEV;
1931 pci_read_config_byte(dev, 0x34, &cap_ptr);
1932 if (cap_ptr != 0x00) {
1933 do {
1934 pci_read_config_dword(dev, cap_ptr, &cap_id);
1936 if ((cap_id & 0xff) != 0x02)
1937 cap_ptr = (cap_id >> 8) & 0xff;
1939 while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
1941 if (cap_ptr == 0x00)
1942 return -ENODEV;
1943 agp_bridge.capndx = cap_ptr;
1945 /* Fill in the mode register */
1946 pci_read_config_dword(agp_bridge.dev,
1947 agp_bridge.capndx + 4,
1948 &agp_bridge.mode);
1950 /* probe for known chipsets */
1951 return agp_lookup_host_bridge (dev);
1954 struct agp_max_table {
1955 int mem;
1956 int agp;
1959 static struct agp_max_table maxes_table[9] __initdata =
1961 {0, 0},
1962 {32, 4},
1963 {64, 28},
1964 {128, 96},
1965 {256, 204},
1966 {512, 440},
1967 {1024, 942},
1968 {2048, 1920},
1969 {4096, 3932}
1972 static int __init agp_find_max (void)
1974 long memory, index, result;
1976 memory = virt_to_phys(high_memory) >> 20;
1977 index = 1;
1979 while ((memory > maxes_table[index].mem) &&
1980 (index < 8)) {
1981 index++;
1984 result = maxes_table[index - 1].agp +
1985 ( (memory - maxes_table[index - 1].mem) *
1986 (maxes_table[index].agp - maxes_table[index - 1].agp)) /
1987 (maxes_table[index].mem - maxes_table[index - 1].mem);
1989 printk(KERN_INFO PFX "Maximum main memory to use "
1990 "for agp memory: %ldM\n", result);
1991 result = result << (20 - PAGE_SHIFT);
1992 return result;
1995 #define AGPGART_VERSION_MAJOR 0
1996 #define AGPGART_VERSION_MINOR 99
1998 static agp_version agp_current_version =
2000 AGPGART_VERSION_MAJOR,
2001 AGPGART_VERSION_MINOR
2004 static int __init agp_backend_initialize(void)
2006 int size_value, rc, got_gatt=0, got_keylist=0;
2008 memset(&agp_bridge, 0, sizeof(struct agp_bridge_data));
2009 agp_bridge.type = NOT_SUPPORTED;
2010 agp_bridge.max_memory_agp = agp_find_max();
2011 agp_bridge.version = &agp_current_version;
2013 rc = agp_find_supported_device();
2014 if (rc) {
2015 /* not KERN_ERR because error msg should have already printed */
2016 printk(KERN_DEBUG PFX "no supported devices found.\n");
2017 return rc;
2020 if (agp_bridge.needs_scratch_page == TRUE) {
2021 agp_bridge.scratch_page = agp_alloc_page();
2023 if (agp_bridge.scratch_page == 0) {
2024 printk(KERN_ERR PFX "unable to get memory for "
2025 "scratch page.\n");
2026 return -ENOMEM;
2028 agp_bridge.scratch_page =
2029 virt_to_phys((void *) agp_bridge.scratch_page);
2030 agp_bridge.scratch_page =
2031 agp_bridge.mask_memory(agp_bridge.scratch_page, 0);
2034 size_value = agp_bridge.fetch_size();
2036 if (size_value == 0) {
2037 printk(KERN_ERR PFX "unable to detrimine aperture size.\n");
2038 rc = -EINVAL;
2039 goto err_out;
2041 if (agp_bridge.create_gatt_table()) {
2042 printk(KERN_ERR PFX "unable to get memory for graphics "
2043 "translation table.\n");
2044 rc = -ENOMEM;
2045 goto err_out;
2047 got_gatt = 1;
2049 agp_bridge.key_list = vmalloc(PAGE_SIZE * 4);
2050 if (agp_bridge.key_list == NULL) {
2051 printk(KERN_ERR PFX "error allocating memory for key lists.\n");
2052 rc = -ENOMEM;
2053 goto err_out;
2055 got_keylist = 1;
2057 /* FIXME vmalloc'd memory not guaranteed contiguous */
2058 memset(agp_bridge.key_list, 0, PAGE_SIZE * 4);
2060 if (agp_bridge.configure()) {
2061 printk(KERN_ERR PFX "error configuring host chipset.\n");
2062 rc = -EINVAL;
2063 goto err_out;
2066 printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n",
2067 size_value, agp_bridge.gart_bus_addr);
2069 return 0;
2071 err_out:
2072 if (agp_bridge.needs_scratch_page == TRUE) {
2073 agp_bridge.scratch_page &= ~(0x00000fff);
2074 agp_destroy_page((unsigned long)
2075 phys_to_virt(agp_bridge.scratch_page));
2077 if (got_gatt)
2078 agp_bridge.free_gatt_table();
2079 if (got_keylist)
2080 vfree(agp_bridge.key_list);
2081 return rc;
2085 /* cannot be __exit b/c as it could be called from __init code */
2086 static void agp_backend_cleanup(void)
2088 agp_bridge.cleanup();
2089 agp_bridge.free_gatt_table();
2090 vfree(agp_bridge.key_list);
2092 if (agp_bridge.needs_scratch_page == TRUE) {
2093 agp_bridge.scratch_page &= ~(0x00000fff);
2094 agp_destroy_page((unsigned long)
2095 phys_to_virt(agp_bridge.scratch_page));
2099 extern int agp_frontend_initialize(void);
2100 extern void agp_frontend_cleanup(void);
2102 static int __init agp_init(void)
2104 int ret_val;
2106 printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Jeff Hartmann\n",
2107 AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
2109 ret_val = agp_backend_initialize();
2110 if (ret_val)
2111 return ret_val;
2113 ret_val = agp_frontend_initialize();
2114 if (ret_val) {
2115 agp_backend_cleanup();
2116 return ret_val;
2119 return 0;
2122 static void __exit agp_cleanup(void)
2124 agp_frontend_cleanup();
2125 agp_backend_cleanup();
2128 module_init(agp_init);
2129 module_exit(agp_cleanup);