2 * AGPGART module version 0.99
3 * Copyright (C) 1999 Jeff Hartmann
4 * Copyright (C) 1999 Precision Insight, Inc.
5 * Copyright (C) 1999 Xi Graphics, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
23 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/config.h>
27 #include <linux/version.h>
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/kernel.h>
31 #include <linux/sched.h>
33 #include <linux/string.h>
34 #include <linux/errno.h>
35 #include <linux/malloc.h>
36 #include <linux/vmalloc.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/pagemap.h>
40 #include <linux/miscdevice.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
46 #include <linux/agp_backend.h>
49 MODULE_AUTHOR("Jeff Hartmann <jhartmann@precisioninsight.com>");
50 MODULE_PARM(agp_try_unsupported
, "1i");
51 EXPORT_SYMBOL(agp_free_memory
);
52 EXPORT_SYMBOL(agp_allocate_memory
);
53 EXPORT_SYMBOL(agp_copy_info
);
54 EXPORT_SYMBOL(agp_bind_memory
);
55 EXPORT_SYMBOL(agp_unbind_memory
);
56 EXPORT_SYMBOL(agp_enable
);
57 EXPORT_SYMBOL(agp_backend_acquire
);
58 EXPORT_SYMBOL(agp_backend_release
);
60 static void flush_cache(void);
62 static struct agp_bridge_data agp_bridge
;
63 static int agp_try_unsupported __initdata
= 0;
66 static inline void flush_cache(void)
69 asm volatile ("wbinvd":::"memory");
70 #elif defined(__alpha__) || defined(__ia64__)
71 /* ??? I wonder if we'll really need to flush caches, or if the
72 core logic can manage to keep the system coherent. The ARM
73 speaks only of using `cflush' to get things in memory in
74 preparation for power failure.
76 If we do need to call `cflush', we'll need a target page,
77 as we can only flush one page at a time.
79 Ditto for IA-64. --davidm 00/08/07 */
82 #error "Please define flush_cache."
87 static atomic_t cpus_waiting
;
89 static void ipi_handler(void *null
)
92 atomic_dec(&cpus_waiting
);
93 while (atomic_read(&cpus_waiting
) > 0)
97 static void smp_flush_cache(void)
99 atomic_set(&cpus_waiting
, smp_num_cpus
- 1);
100 if (smp_call_function(ipi_handler
, NULL
, 1, 0) != 0)
101 panic(PFX
"timed out waiting for the other CPUs!\n");
103 while (atomic_read(&cpus_waiting
) > 0)
106 #define global_cache_flush smp_flush_cache
107 #else /* CONFIG_SMP */
108 #define global_cache_flush flush_cache
109 #endif /* CONFIG_SMP */
111 int agp_backend_acquire(void)
113 if (agp_bridge
.type
== NOT_SUPPORTED
) {
116 atomic_inc(&agp_bridge
.agp_in_use
);
118 if (atomic_read(&agp_bridge
.agp_in_use
) != 1) {
119 atomic_dec(&agp_bridge
.agp_in_use
);
126 void agp_backend_release(void)
128 if (agp_bridge
.type
== NOT_SUPPORTED
) {
131 atomic_dec(&agp_bridge
.agp_in_use
);
136 * Basic Page Allocation Routines -
137 * These routines handle page allocation
138 * and by default they reserve the allocated
139 * memory. They also handle incrementing the
140 * current_memory_agp value, Which is checked
141 * against a maximum value.
144 static unsigned long agp_alloc_page(void)
148 pt
= (void *) __get_free_page(GFP_KERNEL
);
152 atomic_inc(&virt_to_page(pt
)->count
);
153 set_bit(PG_locked
, &virt_to_page(pt
)->flags
);
154 atomic_inc(&agp_bridge
.current_memory_agp
);
155 return (unsigned long) pt
;
158 static void agp_destroy_page(unsigned long page
)
160 void *pt
= (void *) page
;
165 atomic_dec(&virt_to_page(pt
)->count
);
166 clear_bit(PG_locked
, &virt_to_page(pt
)->flags
);
167 wake_up(&virt_to_page(pt
)->wait
);
168 free_page((unsigned long) pt
);
169 atomic_dec(&agp_bridge
.current_memory_agp
);
172 /* End Basic Page Allocation Routines */
175 * Generic routines for handling agp_memory structures -
176 * They use the basic page allocation routines to do the
181 static void agp_free_key(int key
)
188 clear_bit(key
, agp_bridge
.key_list
);
192 static int agp_get_key(void)
196 bit
= find_first_zero_bit(agp_bridge
.key_list
, MAXKEY
);
198 set_bit(bit
, agp_bridge
.key_list
);
204 static agp_memory
*agp_create_memory(int scratch_pages
)
208 new = kmalloc(sizeof(agp_memory
), GFP_KERNEL
);
213 memset(new, 0, sizeof(agp_memory
));
214 new->key
= agp_get_key();
220 new->memory
= vmalloc(PAGE_SIZE
* scratch_pages
);
222 if (new->memory
== NULL
) {
223 agp_free_key(new->key
);
227 new->num_scratch_pages
= scratch_pages
;
231 void agp_free_memory(agp_memory
* curr
)
235 if ((agp_bridge
.type
== NOT_SUPPORTED
) || (curr
== NULL
)) {
238 if (curr
->is_bound
== TRUE
) {
239 agp_unbind_memory(curr
);
241 if (curr
->type
!= 0) {
242 agp_bridge
.free_by_type(curr
);
245 if (curr
->page_count
!= 0) {
246 for (i
= 0; i
< curr
->page_count
; i
++) {
247 curr
->memory
[i
] &= ~(0x00000fff);
248 agp_destroy_page((unsigned long)
249 phys_to_virt(curr
->memory
[i
]));
252 agp_free_key(curr
->key
);
258 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
260 agp_memory
*agp_allocate_memory(size_t page_count
, u32 type
)
266 if (agp_bridge
.type
== NOT_SUPPORTED
) {
269 if ((atomic_read(&agp_bridge
.current_memory_agp
) + page_count
) >
270 agp_bridge
.max_memory_agp
) {
275 new = agp_bridge
.alloc_by_type(page_count
, type
);
278 /* We always increase the module count, since free auto-decrements
284 scratch_pages
= (page_count
+ ENTRIES_PER_PAGE
- 1) / ENTRIES_PER_PAGE
;
286 new = agp_create_memory(scratch_pages
);
292 for (i
= 0; i
< page_count
; i
++) {
293 new->memory
[i
] = agp_alloc_page();
295 if (new->memory
[i
] == 0) {
296 /* Free this structure */
297 agp_free_memory(new);
301 agp_bridge
.mask_memory(
302 virt_to_phys((void *) new->memory
[i
]),
310 /* End - Generic routines for handling agp_memory structures */
312 static int agp_return_size(void)
317 temp
= agp_bridge
.current_size
;
319 switch (agp_bridge
.size_type
) {
321 current_size
= A_SIZE_8(temp
)->size
;
324 current_size
= A_SIZE_16(temp
)->size
;
327 current_size
= A_SIZE_32(temp
)->size
;
330 current_size
= A_SIZE_LVL2(temp
)->size
;
332 case FIXED_APER_SIZE
:
333 current_size
= A_SIZE_FIX(temp
)->size
;
343 /* Routine to copy over information structure */
345 void agp_copy_info(agp_kern_info
* info
)
347 memset(info
, 0, sizeof(agp_kern_info
));
348 if (agp_bridge
.type
== NOT_SUPPORTED
) {
349 info
->chipset
= agp_bridge
.type
;
352 info
->version
.major
= agp_bridge
.version
->major
;
353 info
->version
.minor
= agp_bridge
.version
->minor
;
354 info
->device
= agp_bridge
.dev
;
355 info
->chipset
= agp_bridge
.type
;
356 info
->mode
= agp_bridge
.mode
;
357 info
->aper_base
= agp_bridge
.gart_bus_addr
;
358 info
->aper_size
= agp_return_size();
359 info
->max_memory
= agp_bridge
.max_memory_agp
;
360 info
->current_memory
= atomic_read(&agp_bridge
.current_memory_agp
);
363 /* End - Routine to copy over information structure */
366 * Routines for handling swapping of agp_memory into the GATT -
367 * These routines take agp_memory and insert them into the GATT.
368 * They call device specific routines to actually write to the GATT.
371 int agp_bind_memory(agp_memory
* curr
, off_t pg_start
)
375 if ((agp_bridge
.type
== NOT_SUPPORTED
) ||
376 (curr
== NULL
) || (curr
->is_bound
== TRUE
)) {
379 if (curr
->is_flushed
== FALSE
) {
381 curr
->is_flushed
= TRUE
;
383 ret_val
= agp_bridge
.insert_memory(curr
, pg_start
, curr
->type
);
388 curr
->is_bound
= TRUE
;
389 curr
->pg_start
= pg_start
;
393 int agp_unbind_memory(agp_memory
* curr
)
397 if ((agp_bridge
.type
== NOT_SUPPORTED
) || (curr
== NULL
)) {
400 if (curr
->is_bound
!= TRUE
) {
403 ret_val
= agp_bridge
.remove_memory(curr
, curr
->pg_start
, curr
->type
);
408 curr
->is_bound
= FALSE
;
413 /* End - Routines for handling swapping of agp_memory into the GATT */
416 * Driver routines - start
417 * Currently this module supports the following chipsets:
418 * i810, 440lx, 440bx, 440gx, via vp3, via mvp3, via kx133, via kt133,
419 * amd irongate, ALi M1541, and generic support for the SiS chipsets.
422 /* Generic Agp routines - Start */
424 static void agp_generic_agp_enable(u32 mode
)
426 struct pci_dev
*device
= NULL
;
427 u32 command
, scratch
, cap_id
;
430 pci_read_config_dword(agp_bridge
.dev
,
431 agp_bridge
.capndx
+ 4,
435 * PASS1: go throu all devices that claim to be
436 * AGP devices and collect their data.
439 while ((device
= pci_find_class(PCI_CLASS_DISPLAY_VGA
<< 8,
441 pci_read_config_dword(device
, 0x04, &scratch
);
443 if (!(scratch
& 0x00100000))
446 pci_read_config_byte(device
, 0x34, &cap_ptr
);
448 if (cap_ptr
!= 0x00) {
450 pci_read_config_dword(device
,
453 if ((cap_id
& 0xff) != 0x02)
454 cap_ptr
= (cap_id
>> 8) & 0xff;
456 while (((cap_id
& 0xff) != 0x02) && (cap_ptr
!= 0x00));
458 if (cap_ptr
!= 0x00) {
460 * Ok, here we have a AGP device. Disable impossible
461 * settings, and adjust the readqueue to the minimum.
464 pci_read_config_dword(device
, cap_ptr
+ 4, &scratch
);
466 /* adjust RQ depth */
468 ((command
& ~0xff000000) |
469 min((mode
& 0xff000000),
470 min((command
& 0xff000000),
471 (scratch
& 0xff000000))));
473 /* disable SBA if it's not supported */
474 if (!((command
& 0x00000200) &&
475 (scratch
& 0x00000200) &&
476 (mode
& 0x00000200)))
477 command
&= ~0x00000200;
479 /* disable FW if it's not supported */
480 if (!((command
& 0x00000010) &&
481 (scratch
& 0x00000010) &&
482 (mode
& 0x00000010)))
483 command
&= ~0x00000010;
485 if (!((command
& 4) &&
488 command
&= ~0x00000004;
490 if (!((command
& 2) &&
493 command
&= ~0x00000002;
495 if (!((command
& 1) &&
498 command
&= ~0x00000001;
502 * PASS2: Figure out the 4X/2X/1X setting and enable the
503 * target (our motherboard chipset).
507 command
&= ~3; /* 4X */
510 command
&= ~5; /* 2X */
513 command
&= ~6; /* 1X */
515 command
|= 0x00000100;
517 pci_write_config_dword(agp_bridge
.dev
,
518 agp_bridge
.capndx
+ 8,
522 * PASS3: Go throu all AGP devices and update the
526 while ((device
= pci_find_class(PCI_CLASS_DISPLAY_VGA
<< 8,
528 pci_read_config_dword(device
, 0x04, &scratch
);
530 if (!(scratch
& 0x00100000))
533 pci_read_config_byte(device
, 0x34, &cap_ptr
);
535 if (cap_ptr
!= 0x00) {
537 pci_read_config_dword(device
,
540 if ((cap_id
& 0xff) != 0x02)
541 cap_ptr
= (cap_id
>> 8) & 0xff;
543 while (((cap_id
& 0xff) != 0x02) && (cap_ptr
!= 0x00));
546 pci_write_config_dword(device
, cap_ptr
+ 8, command
);
550 static int agp_generic_create_gatt_table(void)
561 /* The generic routines can't handle 2 level gatt's */
562 if (agp_bridge
.size_type
== LVL2_APER_SIZE
) {
567 i
= agp_bridge
.aperture_size_idx
;
568 temp
= agp_bridge
.current_size
;
569 size
= page_order
= num_entries
= 0;
571 if (agp_bridge
.size_type
!= FIXED_APER_SIZE
) {
573 switch (agp_bridge
.size_type
) {
575 size
= A_SIZE_8(temp
)->size
;
577 A_SIZE_8(temp
)->page_order
;
579 A_SIZE_8(temp
)->num_entries
;
582 size
= A_SIZE_16(temp
)->size
;
583 page_order
= A_SIZE_16(temp
)->page_order
;
584 num_entries
= A_SIZE_16(temp
)->num_entries
;
587 size
= A_SIZE_32(temp
)->size
;
588 page_order
= A_SIZE_32(temp
)->page_order
;
589 num_entries
= A_SIZE_32(temp
)->num_entries
;
591 /* This case will never really happen. */
592 case FIXED_APER_SIZE
:
595 size
= page_order
= num_entries
= 0;
599 table
= (char *) __get_free_pages(GFP_KERNEL
,
604 switch (agp_bridge
.size_type
) {
606 agp_bridge
.current_size
= A_IDX8();
609 agp_bridge
.current_size
= A_IDX16();
612 agp_bridge
.current_size
= A_IDX32();
614 /* This case will never really
617 case FIXED_APER_SIZE
:
620 agp_bridge
.current_size
=
621 agp_bridge
.current_size
;
625 agp_bridge
.aperture_size_idx
= i
;
627 } while ((table
== NULL
) &&
628 (i
< agp_bridge
.num_aperture_sizes
));
630 size
= ((aper_size_info_fixed
*) temp
)->size
;
631 page_order
= ((aper_size_info_fixed
*) temp
)->page_order
;
632 num_entries
= ((aper_size_info_fixed
*) temp
)->num_entries
;
633 table
= (char *) __get_free_pages(GFP_KERNEL
, page_order
);
639 table_end
= table
+ ((PAGE_SIZE
* (1 << page_order
)) - 1);
641 for (page
= virt_to_page(table
); page
<= virt_to_page(table_end
); page
++)
642 set_bit(PG_reserved
, &page
->flags
);
644 agp_bridge
.gatt_table_real
= (unsigned long *) table
;
646 agp_bridge
.gatt_table
= ioremap_nocache(virt_to_phys(table
),
647 (PAGE_SIZE
* (1 << page_order
)));
650 if (agp_bridge
.gatt_table
== NULL
) {
651 for (page
= virt_to_page(table
); page
<= virt_to_page(table_end
); page
++)
652 clear_bit(PG_reserved
, &page
->flags
);
654 free_pages((unsigned long) table
, page_order
);
658 agp_bridge
.gatt_bus_addr
= virt_to_phys(agp_bridge
.gatt_table_real
);
660 for (i
= 0; i
< num_entries
; i
++) {
661 agp_bridge
.gatt_table
[i
] =
662 (unsigned long) agp_bridge
.scratch_page
;
668 static int agp_generic_free_gatt_table(void)
671 char *table
, *table_end
;
675 temp
= agp_bridge
.current_size
;
677 switch (agp_bridge
.size_type
) {
679 page_order
= A_SIZE_8(temp
)->page_order
;
682 page_order
= A_SIZE_16(temp
)->page_order
;
685 page_order
= A_SIZE_32(temp
)->page_order
;
687 case FIXED_APER_SIZE
:
688 page_order
= A_SIZE_FIX(temp
)->page_order
;
691 /* The generic routines can't deal with 2 level gatt's */
699 /* Do not worry about freeing memory, because if this is
700 * called, then all agp memory is deallocated and removed
704 iounmap(agp_bridge
.gatt_table
);
705 table
= (char *) agp_bridge
.gatt_table_real
;
706 table_end
= table
+ ((PAGE_SIZE
* (1 << page_order
)) - 1);
708 for (page
= virt_to_page(table
); page
<= virt_to_page(table_end
); page
++)
709 clear_bit(PG_reserved
, &page
->flags
);
711 free_pages((unsigned long) agp_bridge
.gatt_table_real
, page_order
);
715 static int agp_generic_insert_memory(agp_memory
* mem
,
716 off_t pg_start
, int type
)
718 int i
, j
, num_entries
;
721 temp
= agp_bridge
.current_size
;
723 switch (agp_bridge
.size_type
) {
725 num_entries
= A_SIZE_8(temp
)->num_entries
;
728 num_entries
= A_SIZE_16(temp
)->num_entries
;
731 num_entries
= A_SIZE_32(temp
)->num_entries
;
733 case FIXED_APER_SIZE
:
734 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
737 /* The generic routines can't deal with 2 level gatt's */
745 if (type
!= 0 || mem
->type
!= 0) {
746 /* The generic routines know nothing of memory types */
749 if ((pg_start
+ mem
->page_count
) > num_entries
) {
754 while (j
< (pg_start
+ mem
->page_count
)) {
755 if (!PGE_EMPTY(agp_bridge
.gatt_table
[j
])) {
761 if (mem
->is_flushed
== FALSE
) {
763 mem
->is_flushed
= TRUE
;
765 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
766 agp_bridge
.gatt_table
[j
] = mem
->memory
[i
];
769 agp_bridge
.tlb_flush(mem
);
773 static int agp_generic_remove_memory(agp_memory
* mem
, off_t pg_start
,
778 if (type
!= 0 || mem
->type
!= 0) {
779 /* The generic routines know nothing of memory types */
782 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
783 agp_bridge
.gatt_table
[i
] =
784 (unsigned long) agp_bridge
.scratch_page
;
787 agp_bridge
.tlb_flush(mem
);
791 static agp_memory
*agp_generic_alloc_by_type(size_t page_count
, int type
)
796 static void agp_generic_free_by_type(agp_memory
* curr
)
798 if (curr
->memory
!= NULL
) {
801 agp_free_key(curr
->key
);
805 void agp_enable(u32 mode
)
807 if (agp_bridge
.type
== NOT_SUPPORTED
) return;
808 agp_bridge
.agp_enable(mode
);
811 /* End - Generic Agp routines */
813 #ifdef CONFIG_AGP_I810
814 static aper_size_info_fixed intel_i810_sizes
[] =
817 /* The 32M mode still requires a 64k gatt */
821 #define AGP_DCACHE_MEMORY 1
822 #define AGP_PHYS_MEMORY 2
824 static gatt_mask intel_i810_masks
[] =
827 {(I810_PTE_VALID
| I810_PTE_LOCAL
), AGP_DCACHE_MEMORY
},
831 static struct _intel_i810_private
{
832 struct pci_dev
*i810_dev
; /* device one */
833 volatile u8
*registers
;
834 int num_dcache_entries
;
835 } intel_i810_private
;
837 static int intel_i810_fetch_size(void)
840 aper_size_info_fixed
*values
;
842 pci_read_config_dword(agp_bridge
.dev
, I810_SMRAM_MISCC
, &smram_miscc
);
843 values
= A_SIZE_FIX(agp_bridge
.aperture_sizes
);
845 if ((smram_miscc
& I810_GMS
) == I810_GMS_DISABLE
) {
846 printk(KERN_WARNING PFX
"i810 is disabled\n");
849 if ((smram_miscc
& I810_GFX_MEM_WIN_SIZE
) == I810_GFX_MEM_WIN_32M
) {
850 agp_bridge
.previous_size
=
851 agp_bridge
.current_size
= (void *) (values
+ 1);
852 agp_bridge
.aperture_size_idx
= 1;
853 return values
[1].size
;
855 agp_bridge
.previous_size
=
856 agp_bridge
.current_size
= (void *) (values
);
857 agp_bridge
.aperture_size_idx
= 0;
858 return values
[0].size
;
864 static int intel_i810_configure(void)
866 aper_size_info_fixed
*current_size
;
870 current_size
= A_SIZE_FIX(agp_bridge
.current_size
);
872 pci_read_config_dword(intel_i810_private
.i810_dev
, I810_MMADDR
, &temp
);
875 intel_i810_private
.registers
=
876 (volatile u8
*) ioremap(temp
, 128 * 4096);
878 if ((INREG32(intel_i810_private
.registers
, I810_DRAM_CTL
)
879 & I810_DRAM_ROW_0
) == I810_DRAM_ROW_0_SDRAM
) {
880 /* This will need to be dynamically assigned */
881 printk(KERN_INFO PFX
"detected 4MB dedicated video ram.\n");
882 intel_i810_private
.num_dcache_entries
= 1024;
884 pci_read_config_dword(intel_i810_private
.i810_dev
, I810_GMADDR
, &temp
);
885 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
886 OUTREG32(intel_i810_private
.registers
, I810_PGETBL_CTL
,
887 agp_bridge
.gatt_bus_addr
| I810_PGETBL_ENABLED
);
890 if (agp_bridge
.needs_scratch_page
== TRUE
) {
891 for (i
= 0; i
< current_size
->num_entries
; i
++) {
892 OUTREG32(intel_i810_private
.registers
,
893 I810_PTE_BASE
+ (i
* 4),
894 agp_bridge
.scratch_page
);
900 static void intel_i810_cleanup(void)
902 OUTREG32(intel_i810_private
.registers
, I810_PGETBL_CTL
, 0);
903 iounmap((void *) intel_i810_private
.registers
);
906 static void intel_i810_tlbflush(agp_memory
* mem
)
911 static void intel_i810_agp_enable(u32 mode
)
916 static int intel_i810_insert_entries(agp_memory
* mem
, off_t pg_start
,
919 int i
, j
, num_entries
;
922 temp
= agp_bridge
.current_size
;
923 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
925 if ((pg_start
+ mem
->page_count
) > num_entries
) {
928 for (j
= pg_start
; j
< (pg_start
+ mem
->page_count
); j
++) {
929 if (!PGE_EMPTY(agp_bridge
.gatt_table
[j
])) {
934 if (type
!= 0 || mem
->type
!= 0) {
935 if ((type
== AGP_DCACHE_MEMORY
) &&
936 (mem
->type
== AGP_DCACHE_MEMORY
)) {
940 i
< (pg_start
+ mem
->page_count
); i
++) {
941 OUTREG32(intel_i810_private
.registers
,
942 I810_PTE_BASE
+ (i
* 4),
943 (i
* 4096) | I810_PTE_LOCAL
|
947 agp_bridge
.tlb_flush(mem
);
950 if((type
== AGP_PHYS_MEMORY
) &&
951 (mem
->type
== AGP_PHYS_MEMORY
)) {
959 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
960 OUTREG32(intel_i810_private
.registers
,
961 I810_PTE_BASE
+ (j
* 4), mem
->memory
[i
]);
965 agp_bridge
.tlb_flush(mem
);
969 static int intel_i810_remove_entries(agp_memory
* mem
, off_t pg_start
,
974 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
975 OUTREG32(intel_i810_private
.registers
,
976 I810_PTE_BASE
+ (i
* 4),
977 agp_bridge
.scratch_page
);
981 agp_bridge
.tlb_flush(mem
);
985 static agp_memory
*intel_i810_alloc_by_type(size_t pg_count
, int type
)
989 if (type
== AGP_DCACHE_MEMORY
) {
990 if (pg_count
!= intel_i810_private
.num_dcache_entries
) {
993 new = agp_create_memory(1);
998 new->type
= AGP_DCACHE_MEMORY
;
999 new->page_count
= pg_count
;
1000 new->num_scratch_pages
= 0;
1005 if(type
== AGP_PHYS_MEMORY
) {
1006 /* The I810 requires a physical address to program
1007 * it's mouse pointer into hardware. However the
1008 * Xserver still writes to it through the agp
1011 if (pg_count
!= 1) {
1014 new = agp_create_memory(1);
1020 new->memory
[0] = agp_alloc_page();
1022 if (new->memory
[0] == 0) {
1023 /* Free this structure */
1024 agp_free_memory(new);
1028 agp_bridge
.mask_memory(
1029 virt_to_phys((void *) new->memory
[0]),
1031 new->page_count
= 1;
1032 new->num_scratch_pages
= 1;
1033 new->type
= AGP_PHYS_MEMORY
;
1034 new->physical
= virt_to_phys((void *) new->memory
[0]);
1041 static void intel_i810_free_by_type(agp_memory
* curr
)
1043 agp_free_key(curr
->key
);
1044 if(curr
->type
== AGP_PHYS_MEMORY
) {
1045 agp_destroy_page((unsigned long)
1046 phys_to_virt(curr
->memory
[0]));
1047 vfree(curr
->memory
);
1053 static unsigned long intel_i810_mask_memory(unsigned long addr
, int type
)
1055 /* Type checking must be done elsewhere */
1056 return addr
| agp_bridge
.masks
[type
].mask
;
1059 static int __init
intel_i810_setup(struct pci_dev
*i810_dev
)
1061 intel_i810_private
.i810_dev
= i810_dev
;
1063 agp_bridge
.masks
= intel_i810_masks
;
1064 agp_bridge
.num_of_masks
= 2;
1065 agp_bridge
.aperture_sizes
= (void *) intel_i810_sizes
;
1066 agp_bridge
.size_type
= FIXED_APER_SIZE
;
1067 agp_bridge
.num_aperture_sizes
= 2;
1068 agp_bridge
.dev_private_data
= (void *) &intel_i810_private
;
1069 agp_bridge
.needs_scratch_page
= TRUE
;
1070 agp_bridge
.configure
= intel_i810_configure
;
1071 agp_bridge
.fetch_size
= intel_i810_fetch_size
;
1072 agp_bridge
.cleanup
= intel_i810_cleanup
;
1073 agp_bridge
.tlb_flush
= intel_i810_tlbflush
;
1074 agp_bridge
.mask_memory
= intel_i810_mask_memory
;
1075 agp_bridge
.agp_enable
= intel_i810_agp_enable
;
1076 agp_bridge
.cache_flush
= global_cache_flush
;
1077 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1078 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1079 agp_bridge
.insert_memory
= intel_i810_insert_entries
;
1080 agp_bridge
.remove_memory
= intel_i810_remove_entries
;
1081 agp_bridge
.alloc_by_type
= intel_i810_alloc_by_type
;
1082 agp_bridge
.free_by_type
= intel_i810_free_by_type
;
1087 #endif /* CONFIG_AGP_I810 */
1089 #ifdef CONFIG_AGP_INTEL
1091 static int intel_fetch_size(void)
1095 aper_size_info_16
*values
;
1097 pci_read_config_word(agp_bridge
.dev
, INTEL_APSIZE
, &temp
);
1098 values
= A_SIZE_16(agp_bridge
.aperture_sizes
);
1100 for (i
= 0; i
< agp_bridge
.num_aperture_sizes
; i
++) {
1101 if (temp
== values
[i
].size_value
) {
1102 agp_bridge
.previous_size
=
1103 agp_bridge
.current_size
= (void *) (values
+ i
);
1104 agp_bridge
.aperture_size_idx
= i
;
1105 return values
[i
].size
;
1112 static void intel_tlbflush(agp_memory
* mem
)
1114 pci_write_config_dword(agp_bridge
.dev
, INTEL_AGPCTRL
, 0x2200);
1115 pci_write_config_dword(agp_bridge
.dev
, INTEL_AGPCTRL
, 0x2280);
1118 static void intel_cleanup(void)
1121 aper_size_info_16
*previous_size
;
1123 previous_size
= A_SIZE_16(agp_bridge
.previous_size
);
1124 pci_read_config_word(agp_bridge
.dev
, INTEL_NBXCFG
, &temp
);
1125 pci_write_config_word(agp_bridge
.dev
, INTEL_NBXCFG
, temp
& ~(1 << 9));
1126 pci_write_config_word(agp_bridge
.dev
, INTEL_APSIZE
,
1127 previous_size
->size_value
);
1130 static int intel_configure(void)
1134 aper_size_info_16
*current_size
;
1136 current_size
= A_SIZE_16(agp_bridge
.current_size
);
1139 pci_write_config_word(agp_bridge
.dev
, INTEL_APSIZE
,
1140 current_size
->size_value
);
1142 /* address to map to */
1143 pci_read_config_dword(agp_bridge
.dev
, INTEL_APBASE
, &temp
);
1144 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1146 /* attbase - aperture base */
1147 pci_write_config_dword(agp_bridge
.dev
, INTEL_ATTBASE
,
1148 agp_bridge
.gatt_bus_addr
);
1151 pci_write_config_dword(agp_bridge
.dev
, INTEL_AGPCTRL
, 0x2280);
1154 pci_read_config_word(agp_bridge
.dev
, INTEL_NBXCFG
, &temp2
);
1155 pci_write_config_word(agp_bridge
.dev
, INTEL_NBXCFG
,
1156 (temp2
& ~(1 << 10)) | (1 << 9));
1157 /* clear any possible error conditions */
1158 pci_write_config_byte(agp_bridge
.dev
, INTEL_ERRSTS
+ 1, 7);
1162 static int intel_840_configure(void)
1166 aper_size_info_16
*current_size
;
1168 current_size
= A_SIZE_16(agp_bridge
.current_size
);
1171 pci_write_config_byte(agp_bridge
.dev
, INTEL_APSIZE
,
1172 (char)current_size
->size_value
);
1174 /* address to map to */
1175 pci_read_config_dword(agp_bridge
.dev
, INTEL_APBASE
, &temp
);
1176 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1178 /* attbase - aperture base */
1179 pci_write_config_dword(agp_bridge
.dev
, INTEL_ATTBASE
,
1180 agp_bridge
.gatt_bus_addr
);
1183 pci_write_config_dword(agp_bridge
.dev
, INTEL_AGPCTRL
, 0x0000);
1186 pci_read_config_word(agp_bridge
.dev
, INTEL_I840_MCHCFG
, &temp2
);
1187 pci_write_config_word(agp_bridge
.dev
, INTEL_I840_MCHCFG
,
1189 /* clear any possible error conditions */
1190 pci_write_config_word(agp_bridge
.dev
, INTEL_I840_ERRSTS
, 0xc000);
1194 static unsigned long intel_mask_memory(unsigned long addr
, int type
)
1196 /* Memory type is ignored */
1198 return addr
| agp_bridge
.masks
[0].mask
;
1202 /* Setup function */
1203 static gatt_mask intel_generic_masks
[] =
1208 static aper_size_info_16 intel_generic_sizes
[7] =
1211 {128, 32768, 5, 32},
1219 static int __init
intel_generic_setup (struct pci_dev
*pdev
)
1221 agp_bridge
.masks
= intel_generic_masks
;
1222 agp_bridge
.num_of_masks
= 1;
1223 agp_bridge
.aperture_sizes
= (void *) intel_generic_sizes
;
1224 agp_bridge
.size_type
= U16_APER_SIZE
;
1225 agp_bridge
.num_aperture_sizes
= 7;
1226 agp_bridge
.dev_private_data
= NULL
;
1227 agp_bridge
.needs_scratch_page
= FALSE
;
1228 agp_bridge
.configure
= intel_configure
;
1229 agp_bridge
.fetch_size
= intel_fetch_size
;
1230 agp_bridge
.cleanup
= intel_cleanup
;
1231 agp_bridge
.tlb_flush
= intel_tlbflush
;
1232 agp_bridge
.mask_memory
= intel_mask_memory
;
1233 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1234 agp_bridge
.cache_flush
= global_cache_flush
;
1235 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1236 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1237 agp_bridge
.insert_memory
= agp_generic_insert_memory
;
1238 agp_bridge
.remove_memory
= agp_generic_remove_memory
;
1239 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1240 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1244 (void) pdev
; /* unused */
1247 static int __init
intel_840_setup (struct pci_dev
*pdev
)
1249 agp_bridge
.masks
= intel_generic_masks
;
1250 agp_bridge
.num_of_masks
= 1;
1251 agp_bridge
.aperture_sizes
= (void *) intel_generic_sizes
;
1252 agp_bridge
.size_type
= U16_APER_SIZE
;
1253 agp_bridge
.num_aperture_sizes
= 7;
1254 agp_bridge
.dev_private_data
= NULL
;
1255 agp_bridge
.needs_scratch_page
= FALSE
;
1256 agp_bridge
.configure
= intel_840_configure
;
1257 agp_bridge
.fetch_size
= intel_fetch_size
;
1258 agp_bridge
.cleanup
= intel_cleanup
;
1259 agp_bridge
.tlb_flush
= intel_tlbflush
;
1260 agp_bridge
.mask_memory
= intel_mask_memory
;
1261 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1262 agp_bridge
.cache_flush
= global_cache_flush
;
1263 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1264 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1265 agp_bridge
.insert_memory
= agp_generic_insert_memory
;
1266 agp_bridge
.remove_memory
= agp_generic_remove_memory
;
1267 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1268 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1272 (void) pdev
; /* unused */
1275 #endif /* CONFIG_AGP_INTEL */
1277 #ifdef CONFIG_AGP_VIA
1279 static int via_fetch_size(void)
1283 aper_size_info_8
*values
;
1285 values
= A_SIZE_8(agp_bridge
.aperture_sizes
);
1286 pci_read_config_byte(agp_bridge
.dev
, VIA_APSIZE
, &temp
);
1287 for (i
= 0; i
< agp_bridge
.num_aperture_sizes
; i
++) {
1288 if (temp
== values
[i
].size_value
) {
1289 agp_bridge
.previous_size
=
1290 agp_bridge
.current_size
= (void *) (values
+ i
);
1291 agp_bridge
.aperture_size_idx
= i
;
1292 return values
[i
].size
;
1299 static int via_configure(void)
1302 aper_size_info_8
*current_size
;
1304 current_size
= A_SIZE_8(agp_bridge
.current_size
);
1306 pci_write_config_byte(agp_bridge
.dev
, VIA_APSIZE
,
1307 current_size
->size_value
);
1308 /* address to map too */
1309 pci_read_config_dword(agp_bridge
.dev
, VIA_APBASE
, &temp
);
1310 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1312 /* GART control register */
1313 pci_write_config_dword(agp_bridge
.dev
, VIA_GARTCTRL
, 0x0000000f);
1315 /* attbase - aperture GATT base */
1316 pci_write_config_dword(agp_bridge
.dev
, VIA_ATTBASE
,
1317 (agp_bridge
.gatt_bus_addr
& 0xfffff000) | 3);
1321 static void via_cleanup(void)
1323 aper_size_info_8
*previous_size
;
1325 previous_size
= A_SIZE_8(agp_bridge
.previous_size
);
1326 pci_write_config_dword(agp_bridge
.dev
, VIA_ATTBASE
, 0);
1327 pci_write_config_byte(agp_bridge
.dev
, VIA_APSIZE
,
1328 previous_size
->size_value
);
1331 static void via_tlbflush(agp_memory
* mem
)
1333 pci_write_config_dword(agp_bridge
.dev
, VIA_GARTCTRL
, 0x0000008f);
1334 pci_write_config_dword(agp_bridge
.dev
, VIA_GARTCTRL
, 0x0000000f);
1337 static unsigned long via_mask_memory(unsigned long addr
, int type
)
1339 /* Memory type is ignored */
1341 return addr
| agp_bridge
.masks
[0].mask
;
1344 static aper_size_info_8 via_generic_sizes
[7] =
1347 {128, 32768, 5, 128},
1348 {64, 16384, 4, 192},
1355 static gatt_mask via_generic_masks
[] =
1360 static int __init
via_generic_setup (struct pci_dev
*pdev
)
1362 agp_bridge
.masks
= via_generic_masks
;
1363 agp_bridge
.num_of_masks
= 1;
1364 agp_bridge
.aperture_sizes
= (void *) via_generic_sizes
;
1365 agp_bridge
.size_type
= U8_APER_SIZE
;
1366 agp_bridge
.num_aperture_sizes
= 7;
1367 agp_bridge
.dev_private_data
= NULL
;
1368 agp_bridge
.needs_scratch_page
= FALSE
;
1369 agp_bridge
.configure
= via_configure
;
1370 agp_bridge
.fetch_size
= via_fetch_size
;
1371 agp_bridge
.cleanup
= via_cleanup
;
1372 agp_bridge
.tlb_flush
= via_tlbflush
;
1373 agp_bridge
.mask_memory
= via_mask_memory
;
1374 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1375 agp_bridge
.cache_flush
= global_cache_flush
;
1376 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1377 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1378 agp_bridge
.insert_memory
= agp_generic_insert_memory
;
1379 agp_bridge
.remove_memory
= agp_generic_remove_memory
;
1380 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1381 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1385 (void) pdev
; /* unused */
1388 #endif /* CONFIG_AGP_VIA */
1390 #ifdef CONFIG_AGP_SIS
1392 static int sis_fetch_size(void)
1396 aper_size_info_8
*values
;
1398 pci_read_config_byte(agp_bridge
.dev
, SIS_APSIZE
, &temp_size
);
1399 values
= A_SIZE_8(agp_bridge
.aperture_sizes
);
1400 for (i
= 0; i
< agp_bridge
.num_aperture_sizes
; i
++) {
1401 if ((temp_size
== values
[i
].size_value
) ||
1402 ((temp_size
& ~(0x03)) ==
1403 (values
[i
].size_value
& ~(0x03)))) {
1404 agp_bridge
.previous_size
=
1405 agp_bridge
.current_size
= (void *) (values
+ i
);
1407 agp_bridge
.aperture_size_idx
= i
;
1408 return values
[i
].size
;
1416 static void sis_tlbflush(agp_memory
* mem
)
1418 pci_write_config_byte(agp_bridge
.dev
, SIS_TLBFLUSH
, 0x02);
1421 static int sis_configure(void)
1424 aper_size_info_8
*current_size
;
1426 current_size
= A_SIZE_8(agp_bridge
.current_size
);
1427 pci_write_config_byte(agp_bridge
.dev
, SIS_TLBCNTRL
, 0x05);
1428 pci_read_config_dword(agp_bridge
.dev
, SIS_APBASE
, &temp
);
1429 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1430 pci_write_config_dword(agp_bridge
.dev
, SIS_ATTBASE
,
1431 agp_bridge
.gatt_bus_addr
);
1432 pci_write_config_byte(agp_bridge
.dev
, SIS_APSIZE
,
1433 current_size
->size_value
);
1437 static void sis_cleanup(void)
1439 aper_size_info_8
*previous_size
;
1441 previous_size
= A_SIZE_8(agp_bridge
.previous_size
);
1442 pci_write_config_byte(agp_bridge
.dev
, SIS_APSIZE
,
1443 (previous_size
->size_value
& ~(0x03)));
1446 static unsigned long sis_mask_memory(unsigned long addr
, int type
)
1448 /* Memory type is ignored */
1450 return addr
| agp_bridge
.masks
[0].mask
;
1453 static aper_size_info_8 sis_generic_sizes
[7] =
1455 {256, 65536, 6, 99},
1456 {128, 32768, 5, 83},
1464 static gatt_mask sis_generic_masks
[] =
1469 static int __init
sis_generic_setup (struct pci_dev
*pdev
)
1471 agp_bridge
.masks
= sis_generic_masks
;
1472 agp_bridge
.num_of_masks
= 1;
1473 agp_bridge
.aperture_sizes
= (void *) sis_generic_sizes
;
1474 agp_bridge
.size_type
= U8_APER_SIZE
;
1475 agp_bridge
.num_aperture_sizes
= 7;
1476 agp_bridge
.dev_private_data
= NULL
;
1477 agp_bridge
.needs_scratch_page
= FALSE
;
1478 agp_bridge
.configure
= sis_configure
;
1479 agp_bridge
.fetch_size
= sis_fetch_size
;
1480 agp_bridge
.cleanup
= sis_cleanup
;
1481 agp_bridge
.tlb_flush
= sis_tlbflush
;
1482 agp_bridge
.mask_memory
= sis_mask_memory
;
1483 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1484 agp_bridge
.cache_flush
= global_cache_flush
;
1485 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1486 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1487 agp_bridge
.insert_memory
= agp_generic_insert_memory
;
1488 agp_bridge
.remove_memory
= agp_generic_remove_memory
;
1489 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1490 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1495 #endif /* CONFIG_AGP_SIS */
1497 #ifdef CONFIG_AGP_AMD
1499 typedef struct _amd_page_map
{
1500 unsigned long *real
;
1501 unsigned long *remapped
;
1504 static struct _amd_irongate_private
{
1505 volatile u8
*registers
;
1506 amd_page_map
**gatt_pages
;
1508 } amd_irongate_private
;
1510 static int amd_create_page_map(amd_page_map
*page_map
)
1514 page_map
->real
= (unsigned long *) __get_free_page(GFP_KERNEL
);
1515 if (page_map
->real
== NULL
) {
1518 set_bit(PG_reserved
, &virt_to_page(page_map
->real
)->flags
);
1520 page_map
->remapped
= ioremap_nocache(virt_to_phys(page_map
->real
),
1522 if (page_map
->remapped
== NULL
) {
1523 clear_bit(PG_reserved
,
1524 &virt_to_page(page_map
->real
)->flags
);
1525 free_page((unsigned long) page_map
->real
);
1526 page_map
->real
= NULL
;
1531 for(i
= 0; i
< PAGE_SIZE
/ sizeof(unsigned long); i
++) {
1532 page_map
->remapped
[i
] = agp_bridge
.scratch_page
;
1538 static void amd_free_page_map(amd_page_map
*page_map
)
1540 iounmap(page_map
->remapped
);
1541 clear_bit(PG_reserved
,
1542 &virt_to_page(page_map
->real
)->flags
);
1543 free_page((unsigned long) page_map
->real
);
1546 static void amd_free_gatt_pages(void)
1549 amd_page_map
**tables
;
1550 amd_page_map
*entry
;
1552 tables
= amd_irongate_private
.gatt_pages
;
1553 for(i
= 0; i
< amd_irongate_private
.num_tables
; i
++) {
1555 if (entry
!= NULL
) {
1556 if (entry
->real
!= NULL
) {
1557 amd_free_page_map(entry
);
1565 static int amd_create_gatt_pages(int nr_tables
)
1567 amd_page_map
**tables
;
1568 amd_page_map
*entry
;
1572 tables
= kmalloc((nr_tables
+ 1) * sizeof(amd_page_map
*),
1574 if (tables
== NULL
) {
1577 memset(tables
, 0, sizeof(amd_page_map
*) * (nr_tables
+ 1));
1578 for (i
= 0; i
< nr_tables
; i
++) {
1579 entry
= kmalloc(sizeof(amd_page_map
), GFP_KERNEL
);
1580 if (entry
== NULL
) {
1584 memset(entry
, 0, sizeof(amd_page_map
));
1586 retval
= amd_create_page_map(entry
);
1587 if (retval
!= 0) break;
1589 amd_irongate_private
.num_tables
= nr_tables
;
1590 amd_irongate_private
.gatt_pages
= tables
;
1592 if (retval
!= 0) amd_free_gatt_pages();
1597 /* Since we don't need contigious memory we just try
1598 * to get the gatt table once
1601 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
1602 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
1603 GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr))
1604 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
1605 #define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
1606 GET_PAGE_DIR_IDX(addr)]->remapped)
1608 static int amd_create_gatt_table(void)
1610 aper_size_info_lvl2
*value
;
1611 amd_page_map page_dir
;
1617 value
= A_SIZE_LVL2(agp_bridge
.current_size
);
1618 retval
= amd_create_page_map(&page_dir
);
1623 retval
= amd_create_gatt_pages(value
->num_entries
/ 1024);
1625 amd_free_page_map(&page_dir
);
1629 agp_bridge
.gatt_table_real
= page_dir
.real
;
1630 agp_bridge
.gatt_table
= page_dir
.remapped
;
1631 agp_bridge
.gatt_bus_addr
= virt_to_bus(page_dir
.real
);
1633 /* Get the address for the gart region.
1634 * This is a bus address even on the alpha, b/c its
1635 * used to program the agp master not the cpu
1638 pci_read_config_dword(agp_bridge
.dev
, AMD_APBASE
, &temp
);
1639 addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1640 agp_bridge
.gart_bus_addr
= addr
;
1642 /* Calculate the agp offset */
1643 for(i
= 0; i
< value
->num_entries
/ 1024; i
++, addr
+= 0x00400000) {
1644 page_dir
.remapped
[GET_PAGE_DIR_OFF(addr
)] =
1645 virt_to_bus(amd_irongate_private
.gatt_pages
[i
]->real
);
1646 page_dir
.remapped
[GET_PAGE_DIR_OFF(addr
)] |= 0x00000001;
1652 static int amd_free_gatt_table(void)
1654 amd_page_map page_dir
;
1656 page_dir
.real
= agp_bridge
.gatt_table_real
;
1657 page_dir
.remapped
= agp_bridge
.gatt_table
;
1659 amd_free_gatt_pages();
1660 amd_free_page_map(&page_dir
);
1664 static int amd_irongate_fetch_size(void)
1668 aper_size_info_lvl2
*values
;
1670 pci_read_config_dword(agp_bridge
.dev
, AMD_APSIZE
, &temp
);
1671 temp
= (temp
& 0x0000000e);
1672 values
= A_SIZE_LVL2(agp_bridge
.aperture_sizes
);
1673 for (i
= 0; i
< agp_bridge
.num_aperture_sizes
; i
++) {
1674 if (temp
== values
[i
].size_value
) {
1675 agp_bridge
.previous_size
=
1676 agp_bridge
.current_size
= (void *) (values
+ i
);
1678 agp_bridge
.aperture_size_idx
= i
;
1679 return values
[i
].size
;
1686 static int amd_irongate_configure(void)
1688 aper_size_info_lvl2
*current_size
;
1692 current_size
= A_SIZE_LVL2(agp_bridge
.current_size
);
1694 /* Get the memory mapped registers */
1695 pci_read_config_dword(agp_bridge
.dev
, AMD_MMBASE
, &temp
);
1696 temp
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1697 amd_irongate_private
.registers
= (volatile u8
*) ioremap(temp
, 4096);
1699 /* Write out the address of the gatt table */
1700 OUTREG32(amd_irongate_private
.registers
, AMD_ATTBASE
,
1701 agp_bridge
.gatt_bus_addr
);
1703 /* Write the Sync register */
1704 pci_write_config_byte(agp_bridge
.dev
, AMD_MODECNTL
, 0x80);
1706 /* Set indexing mode */
1707 pci_write_config_byte(agp_bridge
.dev
, AMD_MODECNTL2
, 0x00);
1709 /* Write the enable register */
1710 enable_reg
= INREG16(amd_irongate_private
.registers
, AMD_GARTENABLE
);
1711 enable_reg
= (enable_reg
| 0x0004);
1712 OUTREG16(amd_irongate_private
.registers
, AMD_GARTENABLE
, enable_reg
);
1714 /* Write out the size register */
1715 pci_read_config_dword(agp_bridge
.dev
, AMD_APSIZE
, &temp
);
1716 temp
= (((temp
& ~(0x0000000e)) | current_size
->size_value
)
1718 pci_write_config_dword(agp_bridge
.dev
, AMD_APSIZE
, temp
);
1721 OUTREG32(amd_irongate_private
.registers
, AMD_TLBFLUSH
, 0x00000001);
1726 static void amd_irongate_cleanup(void)
1728 aper_size_info_lvl2
*previous_size
;
1732 previous_size
= A_SIZE_LVL2(agp_bridge
.previous_size
);
1734 enable_reg
= INREG16(amd_irongate_private
.registers
, AMD_GARTENABLE
);
1735 enable_reg
= (enable_reg
& ~(0x0004));
1736 OUTREG16(amd_irongate_private
.registers
, AMD_GARTENABLE
, enable_reg
);
1738 /* Write back the previous size and disable gart translation */
1739 pci_read_config_dword(agp_bridge
.dev
, AMD_APSIZE
, &temp
);
1740 temp
= ((temp
& ~(0x0000000f)) | previous_size
->size_value
);
1741 pci_write_config_dword(agp_bridge
.dev
, AMD_APSIZE
, temp
);
1742 iounmap((void *) amd_irongate_private
.registers
);
1746 * This routine could be implemented by taking the addresses
1747 * written to the GATT, and flushing them individually. However
1748 * currently it just flushes the whole table. Which is probably
1749 * more efficent, since agp_memory blocks can be a large number of
1753 static void amd_irongate_tlbflush(agp_memory
* temp
)
1755 OUTREG32(amd_irongate_private
.registers
, AMD_TLBFLUSH
, 0x00000001);
1758 static unsigned long amd_irongate_mask_memory(unsigned long addr
, int type
)
1760 /* Only type 0 is supported by the irongate */
1762 return addr
| agp_bridge
.masks
[0].mask
;
1765 static int amd_insert_memory(agp_memory
* mem
,
1766 off_t pg_start
, int type
)
1768 int i
, j
, num_entries
;
1769 unsigned long *cur_gatt
;
1772 num_entries
= A_SIZE_LVL2(agp_bridge
.current_size
)->num_entries
;
1774 if (type
!= 0 || mem
->type
!= 0) {
1777 if ((pg_start
+ mem
->page_count
) > num_entries
) {
1782 while (j
< (pg_start
+ mem
->page_count
)) {
1783 addr
= (j
* PAGE_SIZE
) + agp_bridge
.gart_bus_addr
;
1784 cur_gatt
= GET_GATT(addr
);
1785 if (!PGE_EMPTY(cur_gatt
[GET_GATT_OFF(addr
)])) {
1791 if (mem
->is_flushed
== FALSE
) {
1793 mem
->is_flushed
= TRUE
;
1796 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
1797 addr
= (j
* PAGE_SIZE
) + agp_bridge
.gart_bus_addr
;
1798 cur_gatt
= GET_GATT(addr
);
1799 cur_gatt
[GET_GATT_OFF(addr
)] = mem
->memory
[i
];
1801 agp_bridge
.tlb_flush(mem
);
1805 static int amd_remove_memory(agp_memory
* mem
, off_t pg_start
,
1809 unsigned long *cur_gatt
;
1812 if (type
!= 0 || mem
->type
!= 0) {
1815 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
1816 addr
= (i
* PAGE_SIZE
) + agp_bridge
.gart_bus_addr
;
1817 cur_gatt
= GET_GATT(addr
);
1818 cur_gatt
[GET_GATT_OFF(addr
)] =
1819 (unsigned long) agp_bridge
.scratch_page
;
1822 agp_bridge
.tlb_flush(mem
);
1826 static aper_size_info_lvl2 amd_irongate_sizes
[7] =
1828 {2048, 524288, 0x0000000c},
1829 {1024, 262144, 0x0000000a},
1830 {512, 131072, 0x00000008},
1831 {256, 65536, 0x00000006},
1832 {128, 32768, 0x00000004},
1833 {64, 16384, 0x00000002},
1834 {32, 8192, 0x00000000}
1837 static gatt_mask amd_irongate_masks
[] =
1842 static int __init
amd_irongate_setup (struct pci_dev
*pdev
)
1844 agp_bridge
.masks
= amd_irongate_masks
;
1845 agp_bridge
.num_of_masks
= 1;
1846 agp_bridge
.aperture_sizes
= (void *) amd_irongate_sizes
;
1847 agp_bridge
.size_type
= LVL2_APER_SIZE
;
1848 agp_bridge
.num_aperture_sizes
= 7;
1849 agp_bridge
.dev_private_data
= (void *) &amd_irongate_private
;
1850 agp_bridge
.needs_scratch_page
= FALSE
;
1851 agp_bridge
.configure
= amd_irongate_configure
;
1852 agp_bridge
.fetch_size
= amd_irongate_fetch_size
;
1853 agp_bridge
.cleanup
= amd_irongate_cleanup
;
1854 agp_bridge
.tlb_flush
= amd_irongate_tlbflush
;
1855 agp_bridge
.mask_memory
= amd_irongate_mask_memory
;
1856 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1857 agp_bridge
.cache_flush
= global_cache_flush
;
1858 agp_bridge
.create_gatt_table
= amd_create_gatt_table
;
1859 agp_bridge
.free_gatt_table
= amd_free_gatt_table
;
1860 agp_bridge
.insert_memory
= amd_insert_memory
;
1861 agp_bridge
.remove_memory
= amd_remove_memory
;
1862 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1863 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1867 (void) pdev
; /* unused */
1870 #endif /* CONFIG_AGP_AMD */
1872 #ifdef CONFIG_AGP_ALI
1874 static int ali_fetch_size(void)
1878 aper_size_info_32
*values
;
1880 pci_read_config_dword(agp_bridge
.dev
, ALI_ATTBASE
, &temp
);
1881 temp
&= ~(0xfffffff0);
1882 values
= A_SIZE_32(agp_bridge
.aperture_sizes
);
1884 for (i
= 0; i
< agp_bridge
.num_aperture_sizes
; i
++) {
1885 if (temp
== values
[i
].size_value
) {
1886 agp_bridge
.previous_size
=
1887 agp_bridge
.current_size
= (void *) (values
+ i
);
1888 agp_bridge
.aperture_size_idx
= i
;
1889 return values
[i
].size
;
1896 static void ali_tlbflush(agp_memory
* mem
)
1900 pci_read_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
, &temp
);
1901 pci_write_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
,
1902 ((temp
& 0xffffff00) | 0x00000090));
1903 pci_write_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
,
1904 ((temp
& 0xffffff00) | 0x00000010));
1907 static void ali_cleanup(void)
1909 aper_size_info_32
*previous_size
;
1912 previous_size
= A_SIZE_32(agp_bridge
.previous_size
);
1914 pci_read_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
, &temp
);
1915 pci_write_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
,
1916 ((temp
& 0xffffff00) | 0x00000090));
1917 pci_write_config_dword(agp_bridge
.dev
, ALI_ATTBASE
,
1918 previous_size
->size_value
);
1921 static int ali_configure(void)
1924 aper_size_info_32
*current_size
;
1926 current_size
= A_SIZE_32(agp_bridge
.current_size
);
1928 /* aperture size and gatt addr */
1929 pci_write_config_dword(agp_bridge
.dev
, ALI_ATTBASE
,
1930 agp_bridge
.gatt_bus_addr
| current_size
->size_value
);
1933 pci_read_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
, &temp
);
1934 pci_write_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
,
1935 ((temp
& 0xffffff00) | 0x00000010));
1937 /* address to map to */
1938 pci_read_config_dword(agp_bridge
.dev
, ALI_APBASE
, &temp
);
1939 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1943 static unsigned long ali_mask_memory(unsigned long addr
, int type
)
1945 /* Memory type is ignored */
1947 return addr
| agp_bridge
.masks
[0].mask
;
1951 /* Setup function */
1952 static gatt_mask ali_generic_masks
[] =
1957 static aper_size_info_32 ali_generic_sizes
[7] =
1959 {256, 65536, 6, 10},
1968 static int __init
ali_generic_setup (struct pci_dev
*pdev
)
1970 agp_bridge
.masks
= ali_generic_masks
;
1971 agp_bridge
.num_of_masks
= 1;
1972 agp_bridge
.aperture_sizes
= (void *) ali_generic_sizes
;
1973 agp_bridge
.size_type
= U32_APER_SIZE
;
1974 agp_bridge
.num_aperture_sizes
= 7;
1975 agp_bridge
.dev_private_data
= NULL
;
1976 agp_bridge
.needs_scratch_page
= FALSE
;
1977 agp_bridge
.configure
= ali_configure
;
1978 agp_bridge
.fetch_size
= ali_fetch_size
;
1979 agp_bridge
.cleanup
= ali_cleanup
;
1980 agp_bridge
.tlb_flush
= ali_tlbflush
;
1981 agp_bridge
.mask_memory
= ali_mask_memory
;
1982 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1983 agp_bridge
.cache_flush
= global_cache_flush
;
1984 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1985 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1986 agp_bridge
.insert_memory
= agp_generic_insert_memory
;
1987 agp_bridge
.remove_memory
= agp_generic_remove_memory
;
1988 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1989 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1993 (void) pdev
; /* unused */
1996 #endif /* CONFIG_AGP_ALI */
1999 /* per-chipset initialization data.
2000 * note -- all chipsets for a single vendor MUST be grouped together
2003 unsigned short device_id
; /* first, to make table easier to read */
2004 unsigned short vendor_id
;
2005 enum chipset_type chipset
;
2006 const char *vendor_name
;
2007 const char *chipset_name
;
2008 int (*chipset_setup
) (struct pci_dev
*pdev
);
2009 } agp_bridge_info
[] __initdata
= {
2011 #ifdef CONFIG_AGP_ALI
2012 { PCI_DEVICE_ID_AL_M1541_0
,
2017 ali_generic_setup
},
2023 ali_generic_setup
},
2024 #endif /* CONFIG_AGP_ALI */
2026 #ifdef CONFIG_AGP_AMD
2027 { PCI_DEVICE_ID_AMD_IRONGATE_0
,
2032 amd_irongate_setup
},
2038 amd_irongate_setup
},
2039 #endif /* CONFIG_AGP_AMD */
2041 #ifdef CONFIG_AGP_INTEL
2042 { PCI_DEVICE_ID_INTEL_82443LX_0
,
2043 PCI_VENDOR_ID_INTEL
,
2047 intel_generic_setup
},
2048 { PCI_DEVICE_ID_INTEL_82443BX_0
,
2049 PCI_VENDOR_ID_INTEL
,
2053 intel_generic_setup
},
2054 { PCI_DEVICE_ID_INTEL_82443GX_0
,
2055 PCI_VENDOR_ID_INTEL
,
2059 intel_generic_setup
},
2060 { PCI_DEVICE_ID_INTEL_840_0
,
2061 PCI_VENDOR_ID_INTEL
,
2067 PCI_VENDOR_ID_INTEL
,
2071 intel_generic_setup
},
2072 #endif /* CONFIG_AGP_INTEL */
2074 #ifdef CONFIG_AGP_SIS
2075 { PCI_DEVICE_ID_SI_630
,
2080 sis_generic_setup
},
2081 { PCI_DEVICE_ID_SI_540
,
2086 sis_generic_setup
},
2087 { PCI_DEVICE_ID_SI_620
,
2092 sis_generic_setup
},
2093 { PCI_DEVICE_ID_SI_530
,
2098 sis_generic_setup
},
2099 { PCI_DEVICE_ID_SI_630
,
2104 sis_generic_setup
},
2105 { PCI_DEVICE_ID_SI_540
,
2110 sis_generic_setup
},
2111 { PCI_DEVICE_ID_SI_620
,
2116 sis_generic_setup
},
2117 { PCI_DEVICE_ID_SI_530
,
2122 sis_generic_setup
},
2128 sis_generic_setup
},
2129 #endif /* CONFIG_AGP_SIS */
2131 #ifdef CONFIG_AGP_VIA
2132 { PCI_DEVICE_ID_VIA_8501_0
,
2137 via_generic_setup
},
2138 { PCI_DEVICE_ID_VIA_82C597_0
,
2143 via_generic_setup
},
2144 { PCI_DEVICE_ID_VIA_82C598_0
,
2149 via_generic_setup
},
2150 { PCI_DEVICE_ID_VIA_82C691_0
,
2155 via_generic_setup
},
2156 { PCI_DEVICE_ID_VIA_8371_0
,
2161 via_generic_setup
},
2162 { PCI_DEVICE_ID_VIA_8363_0
,
2167 via_generic_setup
},
2173 via_generic_setup
},
2174 #endif /* CONFIG_AGP_VIA */
2176 { 0, }, /* dummy final entry, always present */
2180 /* scan table above for supported devices */
2181 static int __init
agp_lookup_host_bridge (struct pci_dev
*pdev
)
2185 for (i
= 0; i
< arraysize (agp_bridge_info
); i
++)
2186 if (pdev
->vendor
== agp_bridge_info
[i
].vendor_id
)
2189 if (i
>= arraysize (agp_bridge_info
)) {
2190 printk (KERN_DEBUG PFX
"unsupported bridge\n");
2194 while ((i
< arraysize (agp_bridge_info
)) &&
2195 (agp_bridge_info
[i
].vendor_id
== pdev
->vendor
)) {
2196 if (pdev
->device
== agp_bridge_info
[i
].device_id
) {
2197 printk (KERN_INFO PFX
"Detected %s %s chipset\n",
2198 agp_bridge_info
[i
].vendor_name
,
2199 agp_bridge_info
[i
].chipset_name
);
2200 agp_bridge
.type
= agp_bridge_info
[i
].chipset
;
2201 return agp_bridge_info
[i
].chipset_setup (pdev
);
2207 i
--; /* point to vendor generic entry (device_id == 0) */
2209 /* try init anyway, if user requests it AND
2210 * there is a 'generic' bridge entry for this vendor */
2211 if (agp_try_unsupported
&& agp_bridge_info
[i
].device_id
== 0) {
2212 printk(KERN_WARNING PFX
"Trying generic %s routines"
2213 " for device id: %04x\n",
2214 agp_bridge_info
[i
].vendor_name
, pdev
->device
);
2215 agp_bridge
.type
= agp_bridge_info
[i
].chipset
;
2216 return agp_bridge_info
[i
].chipset_setup (pdev
);
2219 printk(KERN_ERR PFX
"Unsupported %s chipset (device id: %04x),"
2220 " you might want to try agp_try_unsupported=1.\n",
2221 agp_bridge_info
[i
].vendor_name
, pdev
->device
);
2226 /* Supported Device Scanning routine */
2228 static int __init
agp_find_supported_device(void)
2230 struct pci_dev
*dev
= NULL
;
2232 u32 cap_id
, scratch
;
2234 if ((dev
= pci_find_class(PCI_CLASS_BRIDGE_HOST
<< 8, NULL
)) == NULL
)
2237 agp_bridge
.dev
= dev
;
2239 /* Need to test for I810 here */
2240 #ifdef CONFIG_AGP_I810
2241 if (dev
->vendor
== PCI_VENDOR_ID_INTEL
) {
2242 struct pci_dev
*i810_dev
;
2244 switch (dev
->device
) {
2245 case PCI_DEVICE_ID_INTEL_810_0
:
2246 i810_dev
= pci_find_device(PCI_VENDOR_ID_INTEL
,
2247 PCI_DEVICE_ID_INTEL_810_1
,
2249 if (i810_dev
== NULL
) {
2250 printk(KERN_ERR PFX
"Detected an Intel i810,"
2251 " but could not find the secondary"
2255 printk(KERN_INFO PFX
"Detected an Intel "
2257 agp_bridge
.type
= INTEL_I810
;
2258 return intel_i810_setup (i810_dev
);
2260 case PCI_DEVICE_ID_INTEL_810_DC100_0
:
2261 i810_dev
= pci_find_device(PCI_VENDOR_ID_INTEL
,
2262 PCI_DEVICE_ID_INTEL_810_DC100_1
,
2264 if (i810_dev
== NULL
) {
2265 printk(KERN_ERR PFX
"Detected an Intel i810 "
2266 "DC100, but could not find the "
2267 "secondary device.\n");
2270 printk(KERN_INFO PFX
"Detected an Intel i810 "
2271 "DC100 Chipset.\n");
2272 agp_bridge
.type
= INTEL_I810
;
2273 return intel_i810_setup(i810_dev
);
2275 case PCI_DEVICE_ID_INTEL_810_E_0
:
2276 i810_dev
= pci_find_device(PCI_VENDOR_ID_INTEL
,
2277 PCI_DEVICE_ID_INTEL_810_E_1
,
2279 if (i810_dev
== NULL
) {
2280 printk(KERN_ERR PFX
"Detected an Intel i810 E"
2281 ", but could not find the secondary "
2285 printk(KERN_INFO PFX
"Detected an Intel i810 E "
2287 agp_bridge
.type
= INTEL_I810
;
2288 return intel_i810_setup(i810_dev
);
2290 case PCI_DEVICE_ID_INTEL_815_0
:
2291 /* The i815 can operate either as an i810 style
2292 * integrated device, or as an AGP4X motherboard.
2294 * This only addresses the first mode:
2296 i810_dev
= pci_find_device(PCI_VENDOR_ID_INTEL
,
2297 PCI_DEVICE_ID_INTEL_815_1
,
2299 if (i810_dev
== NULL
) {
2300 printk(KERN_ERR PFX
"agpgart: Detected an "
2301 "Intel i815, but could not find the"
2302 " secondary device.\n");
2303 agp_bridge
.type
= NOT_SUPPORTED
;
2306 printk(KERN_INFO PFX
"agpgart: Detected an Intel i815 "
2308 agp_bridge
.type
= INTEL_I810
;
2309 return intel_i810_setup(i810_dev
);
2315 #endif /* CONFIG_AGP_I810 */
2318 pci_read_config_dword(dev
, 0x04, &scratch
);
2319 if (!(scratch
& 0x00100000))
2322 pci_read_config_byte(dev
, 0x34, &cap_ptr
);
2323 if (cap_ptr
!= 0x00) {
2325 pci_read_config_dword(dev
, cap_ptr
, &cap_id
);
2327 if ((cap_id
& 0xff) != 0x02)
2328 cap_ptr
= (cap_id
>> 8) & 0xff;
2330 while (((cap_id
& 0xff) != 0x02) && (cap_ptr
!= 0x00));
2332 if (cap_ptr
== 0x00)
2334 agp_bridge
.capndx
= cap_ptr
;
2336 /* Fill in the mode register */
2337 pci_read_config_dword(agp_bridge
.dev
,
2338 agp_bridge
.capndx
+ 4,
2341 /* probe for known chipsets */
2342 return agp_lookup_host_bridge (dev
);
2345 struct agp_max_table
{
2350 static struct agp_max_table maxes_table
[9] __initdata
=
2363 static int __init
agp_find_max (void)
2365 long memory
, index
, result
;
2367 memory
= virt_to_phys(high_memory
) >> 20;
2370 while ((memory
> maxes_table
[index
].mem
) &&
2375 result
= maxes_table
[index
- 1].agp
+
2376 ( (memory
- maxes_table
[index
- 1].mem
) *
2377 (maxes_table
[index
].agp
- maxes_table
[index
- 1].agp
)) /
2378 (maxes_table
[index
].mem
- maxes_table
[index
- 1].mem
);
2380 printk(KERN_INFO PFX
"Maximum main memory to use "
2381 "for agp memory: %ldM\n", result
);
2382 result
= result
<< (20 - PAGE_SHIFT
);
2386 #define AGPGART_VERSION_MAJOR 0
2387 #define AGPGART_VERSION_MINOR 99
2389 static agp_version agp_current_version
=
2391 AGPGART_VERSION_MAJOR
,
2392 AGPGART_VERSION_MINOR
2395 static int __init
agp_backend_initialize(void)
2397 int size_value
, rc
, got_gatt
=0, got_keylist
=0;
2399 memset(&agp_bridge
, 0, sizeof(struct agp_bridge_data
));
2400 agp_bridge
.type
= NOT_SUPPORTED
;
2401 agp_bridge
.max_memory_agp
= agp_find_max();
2402 agp_bridge
.version
= &agp_current_version
;
2404 rc
= agp_find_supported_device();
2406 /* not KERN_ERR because error msg should have already printed */
2407 printk(KERN_DEBUG PFX
"no supported devices found.\n");
2411 if (agp_bridge
.needs_scratch_page
== TRUE
) {
2412 agp_bridge
.scratch_page
= agp_alloc_page();
2414 if (agp_bridge
.scratch_page
== 0) {
2415 printk(KERN_ERR PFX
"unable to get memory for "
2419 agp_bridge
.scratch_page
=
2420 virt_to_phys((void *) agp_bridge
.scratch_page
);
2421 agp_bridge
.scratch_page
=
2422 agp_bridge
.mask_memory(agp_bridge
.scratch_page
, 0);
2425 size_value
= agp_bridge
.fetch_size();
2427 if (size_value
== 0) {
2428 printk(KERN_ERR PFX
"unable to detrimine aperture size.\n");
2432 if (agp_bridge
.create_gatt_table()) {
2433 printk(KERN_ERR PFX
"unable to get memory for graphics "
2434 "translation table.\n");
2440 agp_bridge
.key_list
= vmalloc(PAGE_SIZE
* 4);
2441 if (agp_bridge
.key_list
== NULL
) {
2442 printk(KERN_ERR PFX
"error allocating memory for key lists.\n");
2448 /* FIXME vmalloc'd memory not guaranteed contiguous */
2449 memset(agp_bridge
.key_list
, 0, PAGE_SIZE
* 4);
2451 if (agp_bridge
.configure()) {
2452 printk(KERN_ERR PFX
"error configuring host chipset.\n");
2457 printk(KERN_INFO PFX
"AGP aperture is %dM @ 0x%lx\n",
2458 size_value
, agp_bridge
.gart_bus_addr
);
2463 if (agp_bridge
.needs_scratch_page
== TRUE
) {
2464 agp_bridge
.scratch_page
&= ~(0x00000fff);
2465 agp_destroy_page((unsigned long)
2466 phys_to_virt(agp_bridge
.scratch_page
));
2469 agp_bridge
.free_gatt_table();
2471 vfree(agp_bridge
.key_list
);
2476 /* cannot be __exit b/c as it could be called from __init code */
2477 static void agp_backend_cleanup(void)
2479 agp_bridge
.cleanup();
2480 agp_bridge
.free_gatt_table();
2481 vfree(agp_bridge
.key_list
);
2483 if (agp_bridge
.needs_scratch_page
== TRUE
) {
2484 agp_bridge
.scratch_page
&= ~(0x00000fff);
2485 agp_destroy_page((unsigned long)
2486 phys_to_virt(agp_bridge
.scratch_page
));
2490 extern int agp_frontend_initialize(void);
2491 extern void agp_frontend_cleanup(void);
2493 static int __init
agp_init(void)
2497 printk(KERN_INFO
"Linux agpgart interface v%d.%d (c) Jeff Hartmann\n",
2498 AGPGART_VERSION_MAJOR
, AGPGART_VERSION_MINOR
);
2500 ret_val
= agp_backend_initialize();
2502 agp_bridge
.type
= NOT_SUPPORTED
;
2505 ret_val
= agp_frontend_initialize();
2507 agp_bridge
.type
= NOT_SUPPORTED
;
2508 agp_backend_cleanup();
2515 static void __exit
agp_cleanup(void)
2517 agp_frontend_cleanup();
2518 agp_backend_cleanup();
2521 module_init(agp_init
);
2522 module_exit(agp_cleanup
);