2 * AGPGART module version 0.99
3 * Copyright (C) 1999 Jeff Hartmann
4 * Copyright (C) 1999 Precision Insight, Inc.
5 * Copyright (C) 1999 Xi Graphics, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
23 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/config.h>
27 #include <linux/version.h>
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/kernel.h>
31 #include <linux/sched.h>
33 #include <linux/string.h>
34 #include <linux/errno.h>
35 #include <linux/malloc.h>
36 #include <linux/vmalloc.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/pagemap.h>
40 #include <linux/miscdevice.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
46 #include <linux/agp_backend.h>
49 MODULE_AUTHOR("Jeff Hartmann <jhartmann@precisioninsight.com>");
50 MODULE_PARM(agp_try_unsupported
, "1i");
51 EXPORT_SYMBOL(agp_free_memory
);
52 EXPORT_SYMBOL(agp_allocate_memory
);
53 EXPORT_SYMBOL(agp_copy_info
);
54 EXPORT_SYMBOL(agp_bind_memory
);
55 EXPORT_SYMBOL(agp_unbind_memory
);
56 EXPORT_SYMBOL(agp_enable
);
57 EXPORT_SYMBOL(agp_backend_acquire
);
58 EXPORT_SYMBOL(agp_backend_release
);
60 static void flush_cache(void);
62 static struct agp_bridge_data agp_bridge
;
63 static int agp_try_unsupported __initdata
= 0;
66 static inline void flush_cache(void)
69 asm volatile ("wbinvd":::"memory");
70 #elif defined(__alpha__)
71 /* ??? I wonder if we'll really need to flush caches, or if the
72 core logic can manage to keep the system coherent. The ARM
73 speaks only of using `cflush' to get things in memory in
74 preparation for power failure.
76 If we do need to call `cflush', we'll need a target page,
77 as we can only flush one page at a time. */
80 #error "Please define flush_cache."
85 static atomic_t cpus_waiting
;
87 static void ipi_handler(void *null
)
90 atomic_dec(&cpus_waiting
);
91 while (atomic_read(&cpus_waiting
) > 0)
95 static void smp_flush_cache(void)
97 atomic_set(&cpus_waiting
, smp_num_cpus
- 1);
98 if (smp_call_function(ipi_handler
, NULL
, 1, 0) != 0)
99 panic(PFX
"timed out waiting for the other CPUs!\n");
101 while (atomic_read(&cpus_waiting
) > 0)
104 #define global_cache_flush smp_flush_cache
105 #else /* CONFIG_SMP */
106 #define global_cache_flush flush_cache
107 #endif /* CONFIG_SMP */
109 int agp_backend_acquire(void)
111 atomic_inc(&agp_bridge
.agp_in_use
);
113 if (atomic_read(&agp_bridge
.agp_in_use
) != 1) {
114 atomic_dec(&agp_bridge
.agp_in_use
);
121 void agp_backend_release(void)
123 atomic_dec(&agp_bridge
.agp_in_use
);
128 * Basic Page Allocation Routines -
129 * These routines handle page allocation
130 * and by default they reserve the allocated
131 * memory. They also handle incrementing the
132 * current_memory_agp value, Which is checked
133 * against a maximum value.
136 static unsigned long agp_alloc_page(void)
140 pt
= (void *) __get_free_page(GFP_KERNEL
);
144 atomic_inc(&mem_map
[MAP_NR(pt
)].count
);
145 set_bit(PG_locked
, &mem_map
[MAP_NR(pt
)].flags
);
146 atomic_inc(&agp_bridge
.current_memory_agp
);
147 return (unsigned long) pt
;
150 static void agp_destroy_page(unsigned long page
)
152 void *pt
= (void *) page
;
157 atomic_dec(&mem_map
[MAP_NR(pt
)].count
);
158 clear_bit(PG_locked
, &mem_map
[MAP_NR(pt
)].flags
);
159 wake_up(&mem_map
[MAP_NR(pt
)].wait
);
160 free_page((unsigned long) pt
);
161 atomic_dec(&agp_bridge
.current_memory_agp
);
164 /* End Basic Page Allocation Routines */
167 * Generic routines for handling agp_memory structures -
168 * They use the basic page allocation routines to do the
173 static void agp_free_key(int key
)
180 clear_bit(key
, agp_bridge
.key_list
);
184 static int agp_get_key(void)
188 bit
= find_first_zero_bit(agp_bridge
.key_list
, MAXKEY
);
190 set_bit(bit
, agp_bridge
.key_list
);
196 static agp_memory
*agp_create_memory(int scratch_pages
)
200 new = kmalloc(sizeof(agp_memory
), GFP_KERNEL
);
205 memset(new, 0, sizeof(agp_memory
));
206 new->key
= agp_get_key();
212 new->memory
= vmalloc(PAGE_SIZE
* scratch_pages
);
214 if (new->memory
== NULL
) {
215 agp_free_key(new->key
);
219 new->num_scratch_pages
= scratch_pages
;
223 void agp_free_memory(agp_memory
* curr
)
230 if (curr
->is_bound
== TRUE
) {
231 agp_unbind_memory(curr
);
233 if (curr
->type
!= 0) {
234 agp_bridge
.free_by_type(curr
);
237 if (curr
->page_count
!= 0) {
238 for (i
= 0; i
< curr
->page_count
; i
++) {
239 curr
->memory
[i
] &= ~(0x00000fff);
240 agp_destroy_page((unsigned long)
241 phys_to_virt(curr
->memory
[i
]));
244 agp_free_key(curr
->key
);
250 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
252 agp_memory
*agp_allocate_memory(size_t page_count
, u32 type
)
258 if ((atomic_read(&agp_bridge
.current_memory_agp
) + page_count
) >
259 agp_bridge
.max_memory_agp
) {
264 new = agp_bridge
.alloc_by_type(page_count
, type
);
267 /* We always increase the module count, since free auto-decrements
273 scratch_pages
= (page_count
+ ENTRIES_PER_PAGE
- 1) / ENTRIES_PER_PAGE
;
275 new = agp_create_memory(scratch_pages
);
281 for (i
= 0; i
< page_count
; i
++) {
282 new->memory
[i
] = agp_alloc_page();
284 if (new->memory
[i
] == 0) {
285 /* Free this structure */
286 agp_free_memory(new);
290 agp_bridge
.mask_memory(
291 virt_to_phys((void *) new->memory
[i
]),
299 /* End - Generic routines for handling agp_memory structures */
301 static int agp_return_size(void)
306 temp
= agp_bridge
.current_size
;
308 switch (agp_bridge
.size_type
) {
310 current_size
= A_SIZE_8(temp
)->size
;
313 current_size
= A_SIZE_16(temp
)->size
;
316 current_size
= A_SIZE_32(temp
)->size
;
319 current_size
= A_SIZE_LVL2(temp
)->size
;
321 case FIXED_APER_SIZE
:
322 current_size
= A_SIZE_FIX(temp
)->size
;
332 /* Routine to copy over information structure */
334 void agp_copy_info(agp_kern_info
* info
)
336 memset(info
, 0, sizeof(agp_kern_info
));
337 info
->version
.major
= agp_bridge
.version
->major
;
338 info
->version
.minor
= agp_bridge
.version
->minor
;
339 info
->device
= agp_bridge
.dev
;
340 info
->chipset
= agp_bridge
.type
;
341 info
->mode
= agp_bridge
.mode
;
342 info
->aper_base
= agp_bridge
.gart_bus_addr
;
343 info
->aper_size
= agp_return_size();
344 info
->max_memory
= agp_bridge
.max_memory_agp
;
345 info
->current_memory
= atomic_read(&agp_bridge
.current_memory_agp
);
348 /* End - Routine to copy over information structure */
351 * Routines for handling swapping of agp_memory into the GATT -
352 * These routines take agp_memory and insert them into the GATT.
353 * They call device specific routines to actually write to the GATT.
356 int agp_bind_memory(agp_memory
* curr
, off_t pg_start
)
360 if ((curr
== NULL
) || (curr
->is_bound
== TRUE
)) {
363 if (curr
->is_flushed
== FALSE
) {
365 curr
->is_flushed
= TRUE
;
367 ret_val
= agp_bridge
.insert_memory(curr
, pg_start
, curr
->type
);
372 curr
->is_bound
= TRUE
;
373 curr
->pg_start
= pg_start
;
377 int agp_unbind_memory(agp_memory
* curr
)
384 if (curr
->is_bound
!= TRUE
) {
387 ret_val
= agp_bridge
.remove_memory(curr
, curr
->pg_start
, curr
->type
);
392 curr
->is_bound
= FALSE
;
397 /* End - Routines for handling swapping of agp_memory into the GATT */
400 * Driver routines - start
401 * Currently this module supports the
402 * i810, 440lx, 440bx, 440gx, via vp3, via mvp3,
403 * amd irongate, ALi M1541 and generic support for the
407 /* Generic Agp routines - Start */
409 static void agp_generic_agp_enable(u32 mode
)
411 struct pci_dev
*device
= NULL
;
412 u32 command
, scratch
, cap_id
;
415 pci_read_config_dword(agp_bridge
.dev
,
416 agp_bridge
.capndx
+ 4,
420 * PASS1: go throu all devices that claim to be
421 * AGP devices and collect their data.
424 while ((device
= pci_find_class(PCI_CLASS_DISPLAY_VGA
<< 8,
426 pci_read_config_dword(device
, 0x04, &scratch
);
428 if (!(scratch
& 0x00100000))
431 pci_read_config_byte(device
, 0x34, &cap_ptr
);
433 if (cap_ptr
!= 0x00) {
435 pci_read_config_dword(device
,
438 if ((cap_id
& 0xff) != 0x02)
439 cap_ptr
= (cap_id
>> 8) & 0xff;
441 while (((cap_id
& 0xff) != 0x02) && (cap_ptr
!= 0x00));
443 if (cap_ptr
!= 0x00) {
445 * Ok, here we have a AGP device. Disable impossible
446 * settings, and adjust the readqueue to the minimum.
449 pci_read_config_dword(device
, cap_ptr
+ 4, &scratch
);
451 /* adjust RQ depth */
453 ((command
& ~0xff000000) |
454 min((mode
& 0xff000000),
455 min((command
& 0xff000000),
456 (scratch
& 0xff000000))));
458 /* disable SBA if it's not supported */
459 if (!((command
& 0x00000200) &&
460 (scratch
& 0x00000200) &&
461 (mode
& 0x00000200)))
462 command
&= ~0x00000200;
464 /* disable FW if it's not supported */
465 if (!((command
& 0x00000010) &&
466 (scratch
& 0x00000010) &&
467 (mode
& 0x00000010)))
468 command
&= ~0x00000010;
470 if (!((command
& 4) &&
473 command
&= ~0x00000004;
475 if (!((command
& 2) &&
478 command
&= ~0x00000002;
480 if (!((command
& 1) &&
483 command
&= ~0x00000001;
487 * PASS2: Figure out the 4X/2X/1X setting and enable the
488 * target (our motherboard chipset).
492 command
&= ~3; /* 4X */
495 command
&= ~5; /* 2X */
498 command
&= ~6; /* 1X */
500 command
|= 0x00000100;
502 pci_write_config_dword(agp_bridge
.dev
,
503 agp_bridge
.capndx
+ 8,
507 * PASS3: Go throu all AGP devices and update the
511 while ((device
= pci_find_class(PCI_CLASS_DISPLAY_VGA
<< 8,
513 pci_read_config_dword(device
, 0x04, &scratch
);
515 if (!(scratch
& 0x00100000))
518 pci_read_config_byte(device
, 0x34, &cap_ptr
);
520 if (cap_ptr
!= 0x00) {
522 pci_read_config_dword(device
,
525 if ((cap_id
& 0xff) != 0x02)
526 cap_ptr
= (cap_id
>> 8) & 0xff;
528 while (((cap_id
& 0xff) != 0x02) && (cap_ptr
!= 0x00));
531 pci_write_config_dword(device
, cap_ptr
+ 8, command
);
535 static int agp_generic_create_gatt_table(void)
545 /* The generic routines can't handle 2 level gatt's */
546 if (agp_bridge
.size_type
== LVL2_APER_SIZE
) {
551 i
= agp_bridge
.aperture_size_idx
;
552 temp
= agp_bridge
.current_size
;
553 size
= page_order
= num_entries
= 0;
555 if (agp_bridge
.size_type
!= FIXED_APER_SIZE
) {
557 switch (agp_bridge
.size_type
) {
559 size
= A_SIZE_8(temp
)->size
;
561 A_SIZE_8(temp
)->page_order
;
563 A_SIZE_8(temp
)->num_entries
;
566 size
= A_SIZE_16(temp
)->size
;
567 page_order
= A_SIZE_16(temp
)->page_order
;
568 num_entries
= A_SIZE_16(temp
)->num_entries
;
571 size
= A_SIZE_32(temp
)->size
;
572 page_order
= A_SIZE_32(temp
)->page_order
;
573 num_entries
= A_SIZE_32(temp
)->num_entries
;
575 /* This case will never really happen. */
576 case FIXED_APER_SIZE
:
579 size
= page_order
= num_entries
= 0;
583 table
= (char *) __get_free_pages(GFP_KERNEL
,
588 switch (agp_bridge
.size_type
) {
590 agp_bridge
.current_size
= A_IDX8();
593 agp_bridge
.current_size
= A_IDX16();
596 agp_bridge
.current_size
= A_IDX32();
598 /* This case will never really
601 case FIXED_APER_SIZE
:
604 agp_bridge
.current_size
=
605 agp_bridge
.current_size
;
609 agp_bridge
.aperture_size_idx
= i
;
611 } while ((table
== NULL
) &&
612 (i
< agp_bridge
.num_aperture_sizes
));
614 size
= ((aper_size_info_fixed
*) temp
)->size
;
615 page_order
= ((aper_size_info_fixed
*) temp
)->page_order
;
616 num_entries
= ((aper_size_info_fixed
*) temp
)->num_entries
;
617 table
= (char *) __get_free_pages(GFP_KERNEL
, page_order
);
623 table_end
= table
+ ((PAGE_SIZE
* (1 << page_order
)) - 1);
625 for (i
= MAP_NR(table
); i
< MAP_NR(table_end
); i
++) {
626 set_bit(PG_reserved
, &mem_map
[i
].flags
);
629 agp_bridge
.gatt_table_real
= (unsigned long *) table
;
631 agp_bridge
.gatt_table
= ioremap_nocache(virt_to_phys(table
),
632 (PAGE_SIZE
* (1 << page_order
)));
635 if (agp_bridge
.gatt_table
== NULL
) {
636 for (i
= MAP_NR(table
); i
< MAP_NR(table_end
); i
++) {
637 clear_bit(PG_reserved
, &mem_map
[i
].flags
);
640 free_pages((unsigned long) table
, page_order
);
644 agp_bridge
.gatt_bus_addr
= virt_to_phys(agp_bridge
.gatt_table_real
);
646 for (i
= 0; i
< num_entries
; i
++) {
647 agp_bridge
.gatt_table
[i
] =
648 (unsigned long) agp_bridge
.scratch_page
;
654 static int agp_generic_free_gatt_table(void)
658 char *table
, *table_end
;
661 temp
= agp_bridge
.current_size
;
663 switch (agp_bridge
.size_type
) {
665 page_order
= A_SIZE_8(temp
)->page_order
;
668 page_order
= A_SIZE_16(temp
)->page_order
;
671 page_order
= A_SIZE_32(temp
)->page_order
;
673 case FIXED_APER_SIZE
:
674 page_order
= A_SIZE_FIX(temp
)->page_order
;
677 /* The generic routines can't deal with 2 level gatt's */
685 /* Do not worry about freeing memory, because if this is
686 * called, then all agp memory is deallocated and removed
690 iounmap(agp_bridge
.gatt_table
);
691 table
= (char *) agp_bridge
.gatt_table_real
;
692 table_end
= table
+ ((PAGE_SIZE
* (1 << page_order
)) - 1);
694 for (i
= MAP_NR(table
); i
< MAP_NR(table_end
); i
++) {
695 clear_bit(PG_reserved
, &mem_map
[i
].flags
);
698 free_pages((unsigned long) agp_bridge
.gatt_table_real
, page_order
);
702 static int agp_generic_insert_memory(agp_memory
* mem
,
703 off_t pg_start
, int type
)
705 int i
, j
, num_entries
;
708 temp
= agp_bridge
.current_size
;
710 switch (agp_bridge
.size_type
) {
712 num_entries
= A_SIZE_8(temp
)->num_entries
;
715 num_entries
= A_SIZE_16(temp
)->num_entries
;
718 num_entries
= A_SIZE_32(temp
)->num_entries
;
720 case FIXED_APER_SIZE
:
721 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
724 /* The generic routines can't deal with 2 level gatt's */
732 if (type
!= 0 || mem
->type
!= 0) {
733 /* The generic routines know nothing of memory types */
736 if ((pg_start
+ mem
->page_count
) > num_entries
) {
741 while (j
< (pg_start
+ mem
->page_count
)) {
742 if (!PGE_EMPTY(agp_bridge
.gatt_table
[j
])) {
748 if (mem
->is_flushed
== FALSE
) {
750 mem
->is_flushed
= TRUE
;
752 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
753 agp_bridge
.gatt_table
[j
] = mem
->memory
[i
];
756 agp_bridge
.tlb_flush(mem
);
760 static int agp_generic_remove_memory(agp_memory
* mem
, off_t pg_start
,
765 if (type
!= 0 || mem
->type
!= 0) {
766 /* The generic routines know nothing of memory types */
769 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
770 agp_bridge
.gatt_table
[i
] =
771 (unsigned long) agp_bridge
.scratch_page
;
774 agp_bridge
.tlb_flush(mem
);
778 static agp_memory
*agp_generic_alloc_by_type(size_t page_count
, int type
)
783 static void agp_generic_free_by_type(agp_memory
* curr
)
785 if (curr
->memory
!= NULL
) {
788 agp_free_key(curr
->key
);
792 void agp_enable(u32 mode
)
794 agp_bridge
.agp_enable(mode
);
797 /* End - Generic Agp routines */
799 #ifdef CONFIG_AGP_I810
800 static aper_size_info_fixed intel_i810_sizes
[] =
803 /* The 32M mode still requires a 64k gatt */
807 #define AGP_DCACHE_MEMORY 1
808 #define AGP_PHYS_MEMORY 2
810 static gatt_mask intel_i810_masks
[] =
813 {(I810_PTE_VALID
| I810_PTE_LOCAL
), AGP_DCACHE_MEMORY
},
817 static struct _intel_i810_private
{
818 struct pci_dev
*i810_dev
; /* device one */
819 volatile u8
*registers
;
820 int num_dcache_entries
;
821 } intel_i810_private
;
823 static int intel_i810_fetch_size(void)
826 aper_size_info_fixed
*values
;
828 pci_read_config_dword(agp_bridge
.dev
, I810_SMRAM_MISCC
, &smram_miscc
);
829 values
= A_SIZE_FIX(agp_bridge
.aperture_sizes
);
831 if ((smram_miscc
& I810_GMS
) == I810_GMS_DISABLE
) {
832 printk(KERN_WARNING PFX
"i810 is disabled\n");
835 if ((smram_miscc
& I810_GFX_MEM_WIN_SIZE
) == I810_GFX_MEM_WIN_32M
) {
836 agp_bridge
.previous_size
=
837 agp_bridge
.current_size
= (void *) (values
+ 1);
838 agp_bridge
.aperture_size_idx
= 1;
839 return values
[1].size
;
841 agp_bridge
.previous_size
=
842 agp_bridge
.current_size
= (void *) (values
);
843 agp_bridge
.aperture_size_idx
= 0;
844 return values
[0].size
;
850 static int intel_i810_configure(void)
852 aper_size_info_fixed
*current_size
;
856 current_size
= A_SIZE_FIX(agp_bridge
.current_size
);
858 pci_read_config_dword(intel_i810_private
.i810_dev
, I810_MMADDR
, &temp
);
861 intel_i810_private
.registers
=
862 (volatile u8
*) ioremap(temp
, 128 * 4096);
864 if ((INREG32(intel_i810_private
.registers
, I810_DRAM_CTL
)
865 & I810_DRAM_ROW_0
) == I810_DRAM_ROW_0_SDRAM
) {
866 /* This will need to be dynamically assigned */
867 printk(KERN_INFO PFX
"detected 4MB dedicated video ram.\n");
868 intel_i810_private
.num_dcache_entries
= 1024;
870 pci_read_config_dword(intel_i810_private
.i810_dev
, I810_GMADDR
, &temp
);
871 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
872 OUTREG32(intel_i810_private
.registers
, I810_PGETBL_CTL
,
873 agp_bridge
.gatt_bus_addr
| I810_PGETBL_ENABLED
);
876 if (agp_bridge
.needs_scratch_page
== TRUE
) {
877 for (i
= 0; i
< current_size
->num_entries
; i
++) {
878 OUTREG32(intel_i810_private
.registers
,
879 I810_PTE_BASE
+ (i
* 4),
880 agp_bridge
.scratch_page
);
886 static void intel_i810_cleanup(void)
888 OUTREG32(intel_i810_private
.registers
, I810_PGETBL_CTL
, 0);
889 iounmap((void *) intel_i810_private
.registers
);
892 static void intel_i810_tlbflush(agp_memory
* mem
)
897 static void intel_i810_agp_enable(u32 mode
)
902 static int intel_i810_insert_entries(agp_memory
* mem
, off_t pg_start
,
905 int i
, j
, num_entries
;
908 temp
= agp_bridge
.current_size
;
909 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
911 if ((pg_start
+ mem
->page_count
) > num_entries
) {
914 for (j
= pg_start
; j
< (pg_start
+ mem
->page_count
); j
++) {
915 if (!PGE_EMPTY(agp_bridge
.gatt_table
[j
])) {
920 if (type
!= 0 || mem
->type
!= 0) {
921 if ((type
== AGP_DCACHE_MEMORY
) &&
922 (mem
->type
== AGP_DCACHE_MEMORY
)) {
926 i
< (pg_start
+ mem
->page_count
); i
++) {
927 OUTREG32(intel_i810_private
.registers
,
928 I810_PTE_BASE
+ (i
* 4),
929 (i
* 4096) | I810_PTE_LOCAL
|
933 agp_bridge
.tlb_flush(mem
);
936 if((type
== AGP_PHYS_MEMORY
) &&
937 (mem
->type
== AGP_PHYS_MEMORY
)) {
945 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
946 OUTREG32(intel_i810_private
.registers
,
947 I810_PTE_BASE
+ (j
* 4), mem
->memory
[i
]);
951 agp_bridge
.tlb_flush(mem
);
955 static int intel_i810_remove_entries(agp_memory
* mem
, off_t pg_start
,
960 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
961 OUTREG32(intel_i810_private
.registers
,
962 I810_PTE_BASE
+ (i
* 4),
963 agp_bridge
.scratch_page
);
966 agp_bridge
.tlb_flush(mem
);
970 static agp_memory
*intel_i810_alloc_by_type(size_t pg_count
, int type
)
974 if (type
== AGP_DCACHE_MEMORY
) {
975 if (pg_count
!= intel_i810_private
.num_dcache_entries
) {
978 new = agp_create_memory(1);
983 new->type
= AGP_DCACHE_MEMORY
;
984 new->page_count
= pg_count
;
985 new->num_scratch_pages
= 0;
990 if(type
== AGP_PHYS_MEMORY
) {
991 /* The I810 requires a physical address to program
992 * it's mouse pointer into hardware. However the
993 * Xserver still writes to it through the agp
999 new = agp_create_memory(1);
1005 new->memory
[0] = agp_alloc_page();
1007 if (new->memory
[0] == 0) {
1008 /* Free this structure */
1009 agp_free_memory(new);
1013 agp_bridge
.mask_memory(
1014 virt_to_phys((void *) new->memory
[0]),
1016 new->page_count
= 1;
1017 new->num_scratch_pages
= 1;
1018 new->type
= AGP_PHYS_MEMORY
;
1019 new->physical
= virt_to_phys((void *) new->memory
[0]);
1026 static void intel_i810_free_by_type(agp_memory
* curr
)
1028 agp_free_key(curr
->key
);
1029 if(curr
->type
== AGP_PHYS_MEMORY
) {
1030 agp_destroy_page((unsigned long)
1031 phys_to_virt(curr
->memory
[0]));
1032 vfree(curr
->memory
);
1038 static unsigned long intel_i810_mask_memory(unsigned long addr
, int type
)
1040 /* Type checking must be done elsewhere */
1041 return addr
| agp_bridge
.masks
[type
].mask
;
1044 static int __init
intel_i810_setup(struct pci_dev
*i810_dev
)
1046 intel_i810_private
.i810_dev
= i810_dev
;
1048 agp_bridge
.masks
= intel_i810_masks
;
1049 agp_bridge
.num_of_masks
= 2;
1050 agp_bridge
.aperture_sizes
= (void *) intel_i810_sizes
;
1051 agp_bridge
.size_type
= FIXED_APER_SIZE
;
1052 agp_bridge
.num_aperture_sizes
= 2;
1053 agp_bridge
.dev_private_data
= (void *) &intel_i810_private
;
1054 agp_bridge
.needs_scratch_page
= TRUE
;
1055 agp_bridge
.configure
= intel_i810_configure
;
1056 agp_bridge
.fetch_size
= intel_i810_fetch_size
;
1057 agp_bridge
.cleanup
= intel_i810_cleanup
;
1058 agp_bridge
.tlb_flush
= intel_i810_tlbflush
;
1059 agp_bridge
.mask_memory
= intel_i810_mask_memory
;
1060 agp_bridge
.agp_enable
= intel_i810_agp_enable
;
1061 agp_bridge
.cache_flush
= global_cache_flush
;
1062 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1063 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1064 agp_bridge
.insert_memory
= intel_i810_insert_entries
;
1065 agp_bridge
.remove_memory
= intel_i810_remove_entries
;
1066 agp_bridge
.alloc_by_type
= intel_i810_alloc_by_type
;
1067 agp_bridge
.free_by_type
= intel_i810_free_by_type
;
1072 #endif /* CONFIG_AGP_I810 */
1074 #ifdef CONFIG_AGP_INTEL
1076 static int intel_fetch_size(void)
1080 aper_size_info_16
*values
;
1082 pci_read_config_word(agp_bridge
.dev
, INTEL_APSIZE
, &temp
);
1083 values
= A_SIZE_16(agp_bridge
.aperture_sizes
);
1085 for (i
= 0; i
< agp_bridge
.num_aperture_sizes
; i
++) {
1086 if (temp
== values
[i
].size_value
) {
1087 agp_bridge
.previous_size
=
1088 agp_bridge
.current_size
= (void *) (values
+ i
);
1089 agp_bridge
.aperture_size_idx
= i
;
1090 return values
[i
].size
;
1097 static void intel_tlbflush(agp_memory
* mem
)
1099 pci_write_config_dword(agp_bridge
.dev
, INTEL_AGPCTRL
, 0x2200);
1100 pci_write_config_dword(agp_bridge
.dev
, INTEL_AGPCTRL
, 0x2280);
1103 static void intel_cleanup(void)
1106 aper_size_info_16
*previous_size
;
1108 previous_size
= A_SIZE_16(agp_bridge
.previous_size
);
1109 pci_read_config_word(agp_bridge
.dev
, INTEL_NBXCFG
, &temp
);
1110 pci_write_config_word(agp_bridge
.dev
, INTEL_NBXCFG
, temp
& ~(1 << 9));
1111 pci_write_config_word(agp_bridge
.dev
, INTEL_APSIZE
,
1112 previous_size
->size_value
);
1115 static int intel_configure(void)
1119 aper_size_info_16
*current_size
;
1121 current_size
= A_SIZE_16(agp_bridge
.current_size
);
1124 pci_write_config_word(agp_bridge
.dev
, INTEL_APSIZE
,
1125 current_size
->size_value
);
1127 /* address to map to */
1128 pci_read_config_dword(agp_bridge
.dev
, INTEL_APBASE
, &temp
);
1129 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1131 /* attbase - aperture base */
1132 pci_write_config_dword(agp_bridge
.dev
, INTEL_ATTBASE
,
1133 agp_bridge
.gatt_bus_addr
);
1136 pci_write_config_dword(agp_bridge
.dev
, INTEL_AGPCTRL
, 0x2280);
1139 pci_read_config_word(agp_bridge
.dev
, INTEL_NBXCFG
, &temp2
);
1140 pci_write_config_word(agp_bridge
.dev
, INTEL_NBXCFG
,
1141 (temp2
& ~(1 << 10)) | (1 << 9));
1142 /* clear any possible error conditions */
1143 pci_write_config_byte(agp_bridge
.dev
, INTEL_ERRSTS
+ 1, 7);
1147 static int intel_840_configure(void)
1151 aper_size_info_16
*current_size
;
1153 current_size
= A_SIZE_16(agp_bridge
.current_size
);
1156 pci_write_config_byte(agp_bridge
.dev
, INTEL_APSIZE
,
1157 (char)current_size
->size_value
);
1159 /* address to map to */
1160 pci_read_config_dword(agp_bridge
.dev
, INTEL_APBASE
, &temp
);
1161 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1163 /* attbase - aperture base */
1164 pci_write_config_dword(agp_bridge
.dev
, INTEL_ATTBASE
,
1165 agp_bridge
.gatt_bus_addr
);
1168 pci_write_config_dword(agp_bridge
.dev
, INTEL_AGPCTRL
, 0x0000);
1171 pci_read_config_word(agp_bridge
.dev
, INTEL_I840_MCHCFG
, &temp2
);
1172 pci_write_config_word(agp_bridge
.dev
, INTEL_I840_MCHCFG
,
1174 /* clear any possible error conditions */
1175 pci_write_config_word(agp_bridge
.dev
, INTEL_I840_ERRSTS
, 0xc000);
1179 static unsigned long intel_mask_memory(unsigned long addr
, int type
)
1181 /* Memory type is ignored */
1183 return addr
| agp_bridge
.masks
[0].mask
;
1187 /* Setup function */
1188 static gatt_mask intel_generic_masks
[] =
1193 static aper_size_info_16 intel_generic_sizes
[7] =
1196 {128, 32768, 5, 32},
1204 static int __init
intel_generic_setup (struct pci_dev
*pdev
)
1206 agp_bridge
.masks
= intel_generic_masks
;
1207 agp_bridge
.num_of_masks
= 1;
1208 agp_bridge
.aperture_sizes
= (void *) intel_generic_sizes
;
1209 agp_bridge
.size_type
= U16_APER_SIZE
;
1210 agp_bridge
.num_aperture_sizes
= 7;
1211 agp_bridge
.dev_private_data
= NULL
;
1212 agp_bridge
.needs_scratch_page
= FALSE
;
1213 agp_bridge
.configure
= intel_configure
;
1214 agp_bridge
.fetch_size
= intel_fetch_size
;
1215 agp_bridge
.cleanup
= intel_cleanup
;
1216 agp_bridge
.tlb_flush
= intel_tlbflush
;
1217 agp_bridge
.mask_memory
= intel_mask_memory
;
1218 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1219 agp_bridge
.cache_flush
= global_cache_flush
;
1220 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1221 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1222 agp_bridge
.insert_memory
= agp_generic_insert_memory
;
1223 agp_bridge
.remove_memory
= agp_generic_remove_memory
;
1224 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1225 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1229 (void) pdev
; /* unused */
1232 static int __init
intel_840_setup (struct pci_dev
*pdev
)
1234 agp_bridge
.masks
= intel_generic_masks
;
1235 agp_bridge
.num_of_masks
= 1;
1236 agp_bridge
.aperture_sizes
= (void *) intel_generic_sizes
;
1237 agp_bridge
.size_type
= U16_APER_SIZE
;
1238 agp_bridge
.num_aperture_sizes
= 7;
1239 agp_bridge
.dev_private_data
= NULL
;
1240 agp_bridge
.needs_scratch_page
= FALSE
;
1241 agp_bridge
.configure
= intel_840_configure
;
1242 agp_bridge
.fetch_size
= intel_fetch_size
;
1243 agp_bridge
.cleanup
= intel_cleanup
;
1244 agp_bridge
.tlb_flush
= intel_tlbflush
;
1245 agp_bridge
.mask_memory
= intel_mask_memory
;
1246 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1247 agp_bridge
.cache_flush
= global_cache_flush
;
1248 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1249 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1250 agp_bridge
.insert_memory
= agp_generic_insert_memory
;
1251 agp_bridge
.remove_memory
= agp_generic_remove_memory
;
1252 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1253 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1257 (void) pdev
; /* unused */
1260 #endif /* CONFIG_AGP_INTEL */
1262 #ifdef CONFIG_AGP_VIA
1264 static int via_fetch_size(void)
1268 aper_size_info_8
*values
;
1270 values
= A_SIZE_8(agp_bridge
.aperture_sizes
);
1271 pci_read_config_byte(agp_bridge
.dev
, VIA_APSIZE
, &temp
);
1272 for (i
= 0; i
< agp_bridge
.num_aperture_sizes
; i
++) {
1273 if (temp
== values
[i
].size_value
) {
1274 agp_bridge
.previous_size
=
1275 agp_bridge
.current_size
= (void *) (values
+ i
);
1276 agp_bridge
.aperture_size_idx
= i
;
1277 return values
[i
].size
;
1284 static int via_configure(void)
1287 aper_size_info_8
*current_size
;
1289 current_size
= A_SIZE_8(agp_bridge
.current_size
);
1291 pci_write_config_byte(agp_bridge
.dev
, VIA_APSIZE
,
1292 current_size
->size_value
);
1293 /* address to map too */
1294 pci_read_config_dword(agp_bridge
.dev
, VIA_APBASE
, &temp
);
1295 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1297 /* GART control register */
1298 pci_write_config_dword(agp_bridge
.dev
, VIA_GARTCTRL
, 0x0000000f);
1300 /* attbase - aperture GATT base */
1301 pci_write_config_dword(agp_bridge
.dev
, VIA_ATTBASE
,
1302 (agp_bridge
.gatt_bus_addr
& 0xfffff000) | 3);
1306 static void via_cleanup(void)
1308 aper_size_info_8
*previous_size
;
1310 previous_size
= A_SIZE_8(agp_bridge
.previous_size
);
1311 pci_write_config_dword(agp_bridge
.dev
, VIA_ATTBASE
, 0);
1312 pci_write_config_byte(agp_bridge
.dev
, VIA_APSIZE
,
1313 previous_size
->size_value
);
1316 static void via_tlbflush(agp_memory
* mem
)
1318 pci_write_config_dword(agp_bridge
.dev
, VIA_GARTCTRL
, 0x0000008f);
1319 pci_write_config_dword(agp_bridge
.dev
, VIA_GARTCTRL
, 0x0000000f);
1322 static unsigned long via_mask_memory(unsigned long addr
, int type
)
1324 /* Memory type is ignored */
1326 return addr
| agp_bridge
.masks
[0].mask
;
1329 static aper_size_info_8 via_generic_sizes
[7] =
1332 {128, 32768, 5, 128},
1333 {64, 16384, 4, 192},
1340 static gatt_mask via_generic_masks
[] =
1345 static int __init
via_generic_setup (struct pci_dev
*pdev
)
1347 agp_bridge
.masks
= via_generic_masks
;
1348 agp_bridge
.num_of_masks
= 1;
1349 agp_bridge
.aperture_sizes
= (void *) via_generic_sizes
;
1350 agp_bridge
.size_type
= U8_APER_SIZE
;
1351 agp_bridge
.num_aperture_sizes
= 7;
1352 agp_bridge
.dev_private_data
= NULL
;
1353 agp_bridge
.needs_scratch_page
= FALSE
;
1354 agp_bridge
.configure
= via_configure
;
1355 agp_bridge
.fetch_size
= via_fetch_size
;
1356 agp_bridge
.cleanup
= via_cleanup
;
1357 agp_bridge
.tlb_flush
= via_tlbflush
;
1358 agp_bridge
.mask_memory
= via_mask_memory
;
1359 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1360 agp_bridge
.cache_flush
= global_cache_flush
;
1361 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1362 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1363 agp_bridge
.insert_memory
= agp_generic_insert_memory
;
1364 agp_bridge
.remove_memory
= agp_generic_remove_memory
;
1365 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1366 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1370 (void) pdev
; /* unused */
1373 #endif /* CONFIG_AGP_VIA */
1375 #ifdef CONFIG_AGP_SIS
1377 static int sis_fetch_size(void)
1381 aper_size_info_8
*values
;
1383 pci_read_config_byte(agp_bridge
.dev
, SIS_APSIZE
, &temp_size
);
1384 values
= A_SIZE_8(agp_bridge
.aperture_sizes
);
1385 for (i
= 0; i
< agp_bridge
.num_aperture_sizes
; i
++) {
1386 if ((temp_size
== values
[i
].size_value
) ||
1387 ((temp_size
& ~(0x03)) ==
1388 (values
[i
].size_value
& ~(0x03)))) {
1389 agp_bridge
.previous_size
=
1390 agp_bridge
.current_size
= (void *) (values
+ i
);
1392 agp_bridge
.aperture_size_idx
= i
;
1393 return values
[i
].size
;
1401 static void sis_tlbflush(agp_memory
* mem
)
1403 pci_write_config_byte(agp_bridge
.dev
, SIS_TLBFLUSH
, 0x02);
1406 static int sis_configure(void)
1409 aper_size_info_8
*current_size
;
1411 current_size
= A_SIZE_8(agp_bridge
.current_size
);
1412 pci_write_config_byte(agp_bridge
.dev
, SIS_TLBCNTRL
, 0x05);
1413 pci_read_config_dword(agp_bridge
.dev
, SIS_APBASE
, &temp
);
1414 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1415 pci_write_config_dword(agp_bridge
.dev
, SIS_ATTBASE
,
1416 agp_bridge
.gatt_bus_addr
);
1417 pci_write_config_byte(agp_bridge
.dev
, SIS_APSIZE
,
1418 current_size
->size_value
);
1422 static void sis_cleanup(void)
1424 aper_size_info_8
*previous_size
;
1426 previous_size
= A_SIZE_8(agp_bridge
.previous_size
);
1427 pci_write_config_byte(agp_bridge
.dev
, SIS_APSIZE
,
1428 (previous_size
->size_value
& ~(0x03)));
1431 static unsigned long sis_mask_memory(unsigned long addr
, int type
)
1433 /* Memory type is ignored */
1435 return addr
| agp_bridge
.masks
[0].mask
;
1438 static aper_size_info_8 sis_generic_sizes
[7] =
1440 {256, 65536, 6, 99},
1441 {128, 32768, 5, 83},
1449 static gatt_mask sis_generic_masks
[] =
1454 static int __init
sis_generic_setup (struct pci_dev
*pdev
)
1456 agp_bridge
.masks
= sis_generic_masks
;
1457 agp_bridge
.num_of_masks
= 1;
1458 agp_bridge
.aperture_sizes
= (void *) sis_generic_sizes
;
1459 agp_bridge
.size_type
= U8_APER_SIZE
;
1460 agp_bridge
.num_aperture_sizes
= 7;
1461 agp_bridge
.dev_private_data
= NULL
;
1462 agp_bridge
.needs_scratch_page
= FALSE
;
1463 agp_bridge
.configure
= sis_configure
;
1464 agp_bridge
.fetch_size
= sis_fetch_size
;
1465 agp_bridge
.cleanup
= sis_cleanup
;
1466 agp_bridge
.tlb_flush
= sis_tlbflush
;
1467 agp_bridge
.mask_memory
= sis_mask_memory
;
1468 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1469 agp_bridge
.cache_flush
= global_cache_flush
;
1470 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1471 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1472 agp_bridge
.insert_memory
= agp_generic_insert_memory
;
1473 agp_bridge
.remove_memory
= agp_generic_remove_memory
;
1474 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1475 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1480 #endif /* CONFIG_AGP_SIS */
1482 #ifdef CONFIG_AGP_AMD
1484 typedef struct _amd_page_map
{
1485 unsigned long *real
;
1486 unsigned long *remapped
;
1489 static struct _amd_irongate_private
{
1490 volatile u8
*registers
;
1491 amd_page_map
**gatt_pages
;
1493 } amd_irongate_private
;
1495 static int amd_create_page_map(amd_page_map
*page_map
)
1499 page_map
->real
= (unsigned long *) __get_free_page(GFP_KERNEL
);
1500 if (page_map
->real
== NULL
) {
1503 set_bit(PG_reserved
, &mem_map
[MAP_NR(page_map
->real
)].flags
);
1505 page_map
->remapped
= ioremap_nocache(virt_to_phys(page_map
->real
),
1507 if (page_map
->remapped
== NULL
) {
1508 clear_bit(PG_reserved
,
1509 &mem_map
[MAP_NR(page_map
->real
)].flags
);
1510 free_page((unsigned long) page_map
->real
);
1511 page_map
->real
= NULL
;
1516 for(i
= 0; i
< PAGE_SIZE
/ sizeof(unsigned long); i
++) {
1517 page_map
->remapped
[i
] = agp_bridge
.scratch_page
;
1523 static void amd_free_page_map(amd_page_map
*page_map
)
1525 iounmap(page_map
->remapped
);
1526 clear_bit(PG_reserved
,
1527 &mem_map
[MAP_NR(page_map
->real
)].flags
);
1528 free_page((unsigned long) page_map
->real
);
1531 static void amd_free_gatt_pages(void)
1534 amd_page_map
**tables
;
1535 amd_page_map
*entry
;
1537 tables
= amd_irongate_private
.gatt_pages
;
1538 for(i
= 0; i
< amd_irongate_private
.num_tables
; i
++) {
1540 if (entry
!= NULL
) {
1541 if (entry
->real
!= NULL
) {
1542 amd_free_page_map(entry
);
1550 static int amd_create_gatt_pages(int nr_tables
)
1552 amd_page_map
**tables
;
1553 amd_page_map
*entry
;
1557 tables
= kmalloc((nr_tables
+ 1) * sizeof(amd_page_map
*),
1559 if (tables
== NULL
) {
1562 memset(tables
, 0, sizeof(amd_page_map
*) * (nr_tables
+ 1));
1563 for (i
= 0; i
< nr_tables
; i
++) {
1564 entry
= kmalloc(sizeof(amd_page_map
), GFP_KERNEL
);
1565 if (entry
== NULL
) {
1569 memset(entry
, 0, sizeof(amd_page_map
));
1571 retval
= amd_create_page_map(entry
);
1572 if (retval
!= 0) break;
1574 amd_irongate_private
.num_tables
= nr_tables
;
1575 amd_irongate_private
.gatt_pages
= tables
;
1577 if (retval
!= 0) amd_free_gatt_pages();
1582 /* Since we don't need contigious memory we just try
1583 * to get the gatt table once
1586 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
1587 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
1588 GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr))
1589 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
1590 #define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
1591 GET_PAGE_DIR_IDX(addr)]->remapped)
1593 static int amd_create_gatt_table(void)
1595 aper_size_info_lvl2
*value
;
1596 amd_page_map page_dir
;
1602 value
= A_SIZE_LVL2(agp_bridge
.current_size
);
1603 retval
= amd_create_page_map(&page_dir
);
1608 retval
= amd_create_gatt_pages(value
->num_entries
/ 1024);
1610 amd_free_page_map(&page_dir
);
1614 agp_bridge
.gatt_table_real
= page_dir
.real
;
1615 agp_bridge
.gatt_table
= page_dir
.remapped
;
1616 agp_bridge
.gatt_bus_addr
= virt_to_bus(page_dir
.real
);
1618 /* Get the address for the gart region.
1619 * This is a bus address even on the alpha, b/c its
1620 * used to program the agp master not the cpu
1623 pci_read_config_dword(agp_bridge
.dev
, AMD_APBASE
, &temp
);
1624 addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1625 agp_bridge
.gart_bus_addr
= addr
;
1627 /* Calculate the agp offset */
1628 for(i
= 0; i
< value
->num_entries
/ 1024; i
++, addr
+= 0x00400000) {
1629 page_dir
.remapped
[GET_PAGE_DIR_OFF(addr
)] =
1630 virt_to_bus(amd_irongate_private
.gatt_pages
[i
]->real
);
1631 page_dir
.remapped
[GET_PAGE_DIR_OFF(addr
)] |= 0x00000001;
1637 static int amd_free_gatt_table(void)
1639 amd_page_map page_dir
;
1641 page_dir
.real
= agp_bridge
.gatt_table_real
;
1642 page_dir
.remapped
= agp_bridge
.gatt_table
;
1644 amd_free_gatt_pages();
1645 amd_free_page_map(&page_dir
);
1649 static int amd_irongate_fetch_size(void)
1653 aper_size_info_lvl2
*values
;
1655 pci_read_config_dword(agp_bridge
.dev
, AMD_APSIZE
, &temp
);
1656 temp
= (temp
& 0x0000000e);
1657 values
= A_SIZE_LVL2(agp_bridge
.aperture_sizes
);
1658 for (i
= 0; i
< agp_bridge
.num_aperture_sizes
; i
++) {
1659 if (temp
== values
[i
].size_value
) {
1660 agp_bridge
.previous_size
=
1661 agp_bridge
.current_size
= (void *) (values
+ i
);
1663 agp_bridge
.aperture_size_idx
= i
;
1664 return values
[i
].size
;
1671 static int amd_irongate_configure(void)
1673 aper_size_info_lvl2
*current_size
;
1677 current_size
= A_SIZE_LVL2(agp_bridge
.current_size
);
1679 /* Get the memory mapped registers */
1680 pci_read_config_dword(agp_bridge
.dev
, AMD_MMBASE
, &temp
);
1681 temp
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1682 amd_irongate_private
.registers
= (volatile u8
*) ioremap(temp
, 4096);
1684 /* Write out the address of the gatt table */
1685 OUTREG32(amd_irongate_private
.registers
, AMD_ATTBASE
,
1686 agp_bridge
.gatt_bus_addr
);
1688 /* Write the Sync register */
1689 pci_write_config_byte(agp_bridge
.dev
, AMD_MODECNTL
, 0x80);
1691 /* Set indexing mode */
1692 pci_write_config_byte(agp_bridge
.dev
, AMD_MODECNTL2
, 0x00);
1694 /* Write the enable register */
1695 enable_reg
= INREG16(amd_irongate_private
.registers
, AMD_GARTENABLE
);
1696 enable_reg
= (enable_reg
| 0x0004);
1697 OUTREG16(amd_irongate_private
.registers
, AMD_GARTENABLE
, enable_reg
);
1699 /* Write out the size register */
1700 pci_read_config_dword(agp_bridge
.dev
, AMD_APSIZE
, &temp
);
1701 temp
= (((temp
& ~(0x0000000e)) | current_size
->size_value
)
1703 pci_write_config_dword(agp_bridge
.dev
, AMD_APSIZE
, temp
);
1706 OUTREG32(amd_irongate_private
.registers
, AMD_TLBFLUSH
, 0x00000001);
1711 static void amd_irongate_cleanup(void)
1713 aper_size_info_lvl2
*previous_size
;
1717 previous_size
= A_SIZE_LVL2(agp_bridge
.previous_size
);
1719 enable_reg
= INREG16(amd_irongate_private
.registers
, AMD_GARTENABLE
);
1720 enable_reg
= (enable_reg
& ~(0x0004));
1721 OUTREG16(amd_irongate_private
.registers
, AMD_GARTENABLE
, enable_reg
);
1723 /* Write back the previous size and disable gart translation */
1724 pci_read_config_dword(agp_bridge
.dev
, AMD_APSIZE
, &temp
);
1725 temp
= ((temp
& ~(0x0000000f)) | previous_size
->size_value
);
1726 pci_write_config_dword(agp_bridge
.dev
, AMD_APSIZE
, temp
);
1727 iounmap((void *) amd_irongate_private
.registers
);
1731 * This routine could be implemented by taking the addresses
1732 * written to the GATT, and flushing them individually. However
1733 * currently it just flushes the whole table. Which is probably
1734 * more efficent, since agp_memory blocks can be a large number of
1738 static void amd_irongate_tlbflush(agp_memory
* temp
)
1740 OUTREG32(amd_irongate_private
.registers
, AMD_TLBFLUSH
, 0x00000001);
1743 static unsigned long amd_irongate_mask_memory(unsigned long addr
, int type
)
1745 /* Only type 0 is supported by the irongate */
1747 return addr
| agp_bridge
.masks
[0].mask
;
1750 static int amd_insert_memory(agp_memory
* mem
,
1751 off_t pg_start
, int type
)
1753 int i
, j
, num_entries
;
1754 unsigned long *cur_gatt
;
1757 num_entries
= A_SIZE_LVL2(agp_bridge
.current_size
)->num_entries
;
1759 if (type
!= 0 || mem
->type
!= 0) {
1762 if ((pg_start
+ mem
->page_count
) > num_entries
) {
1767 while (j
< (pg_start
+ mem
->page_count
)) {
1768 addr
= (j
* PAGE_SIZE
) + agp_bridge
.gart_bus_addr
;
1769 cur_gatt
= GET_GATT(addr
);
1770 if (!PGE_EMPTY(cur_gatt
[GET_GATT_OFF(addr
)])) {
1776 if (mem
->is_flushed
== FALSE
) {
1778 mem
->is_flushed
= TRUE
;
1781 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
1782 addr
= (j
* PAGE_SIZE
) + agp_bridge
.gart_bus_addr
;
1783 cur_gatt
= GET_GATT(addr
);
1784 cur_gatt
[GET_GATT_OFF(addr
)] = mem
->memory
[i
];
1786 agp_bridge
.tlb_flush(mem
);
1790 static int amd_remove_memory(agp_memory
* mem
, off_t pg_start
,
1794 unsigned long *cur_gatt
;
1797 if (type
!= 0 || mem
->type
!= 0) {
1800 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
1801 addr
= (i
* PAGE_SIZE
) + agp_bridge
.gart_bus_addr
;
1802 cur_gatt
= GET_GATT(addr
);
1803 cur_gatt
[GET_GATT_OFF(addr
)] =
1804 (unsigned long) agp_bridge
.scratch_page
;
1807 agp_bridge
.tlb_flush(mem
);
1811 static aper_size_info_lvl2 amd_irongate_sizes
[7] =
1813 {2048, 524288, 0x0000000c},
1814 {1024, 262144, 0x0000000a},
1815 {512, 131072, 0x00000008},
1816 {256, 65536, 0x00000006},
1817 {128, 32768, 0x00000004},
1818 {64, 16384, 0x00000002},
1819 {32, 8192, 0x00000000}
1822 static gatt_mask amd_irongate_masks
[] =
1827 static int __init
amd_irongate_setup (struct pci_dev
*pdev
)
1829 agp_bridge
.masks
= amd_irongate_masks
;
1830 agp_bridge
.num_of_masks
= 1;
1831 agp_bridge
.aperture_sizes
= (void *) amd_irongate_sizes
;
1832 agp_bridge
.size_type
= LVL2_APER_SIZE
;
1833 agp_bridge
.num_aperture_sizes
= 7;
1834 agp_bridge
.dev_private_data
= (void *) &amd_irongate_private
;
1835 agp_bridge
.needs_scratch_page
= FALSE
;
1836 agp_bridge
.configure
= amd_irongate_configure
;
1837 agp_bridge
.fetch_size
= amd_irongate_fetch_size
;
1838 agp_bridge
.cleanup
= amd_irongate_cleanup
;
1839 agp_bridge
.tlb_flush
= amd_irongate_tlbflush
;
1840 agp_bridge
.mask_memory
= amd_irongate_mask_memory
;
1841 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1842 agp_bridge
.cache_flush
= global_cache_flush
;
1843 agp_bridge
.create_gatt_table
= amd_create_gatt_table
;
1844 agp_bridge
.free_gatt_table
= amd_free_gatt_table
;
1845 agp_bridge
.insert_memory
= amd_insert_memory
;
1846 agp_bridge
.remove_memory
= amd_remove_memory
;
1847 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1848 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1852 (void) pdev
; /* unused */
1855 #endif /* CONFIG_AGP_AMD */
1857 #ifdef CONFIG_AGP_ALI
1859 static int ali_fetch_size(void)
1863 aper_size_info_32
*values
;
1865 pci_read_config_dword(agp_bridge
.dev
, ALI_ATTBASE
, &temp
);
1866 temp
&= ~(0xfffffff0);
1867 values
= A_SIZE_32(agp_bridge
.aperture_sizes
);
1869 for (i
= 0; i
< agp_bridge
.num_aperture_sizes
; i
++) {
1870 if (temp
== values
[i
].size_value
) {
1871 agp_bridge
.previous_size
=
1872 agp_bridge
.current_size
= (void *) (values
+ i
);
1873 agp_bridge
.aperture_size_idx
= i
;
1874 return values
[i
].size
;
1881 static void ali_tlbflush(agp_memory
* mem
)
1885 pci_read_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
, &temp
);
1886 pci_write_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
,
1887 ((temp
& 0xffffff00) | 0x00000090));
1888 pci_write_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
,
1889 ((temp
& 0xffffff00) | 0x00000010));
1892 static void ali_cleanup(void)
1894 aper_size_info_32
*previous_size
;
1897 previous_size
= A_SIZE_32(agp_bridge
.previous_size
);
1899 pci_read_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
, &temp
);
1900 pci_write_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
,
1901 ((temp
& 0xffffff00) | 0x00000090));
1902 pci_write_config_dword(agp_bridge
.dev
, ALI_ATTBASE
,
1903 previous_size
->size_value
);
1906 static int ali_configure(void)
1909 aper_size_info_32
*current_size
;
1911 current_size
= A_SIZE_32(agp_bridge
.current_size
);
1913 /* aperture size and gatt addr */
1914 pci_write_config_dword(agp_bridge
.dev
, ALI_ATTBASE
,
1915 agp_bridge
.gatt_bus_addr
| current_size
->size_value
);
1918 pci_read_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
, &temp
);
1919 pci_write_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
,
1920 ((temp
& 0xffffff00) | 0x00000010));
1922 /* address to map to */
1923 pci_read_config_dword(agp_bridge
.dev
, ALI_APBASE
, &temp
);
1924 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1928 static unsigned long ali_mask_memory(unsigned long addr
, int type
)
1930 /* Memory type is ignored */
1932 return addr
| agp_bridge
.masks
[0].mask
;
1936 /* Setup function */
1937 static gatt_mask ali_generic_masks
[] =
1942 static aper_size_info_32 ali_generic_sizes
[7] =
1944 {256, 65536, 6, 10},
1953 static int __init
ali_generic_setup (struct pci_dev
*pdev
)
1955 agp_bridge
.masks
= ali_generic_masks
;
1956 agp_bridge
.num_of_masks
= 1;
1957 agp_bridge
.aperture_sizes
= (void *) ali_generic_sizes
;
1958 agp_bridge
.size_type
= U32_APER_SIZE
;
1959 agp_bridge
.num_aperture_sizes
= 7;
1960 agp_bridge
.dev_private_data
= NULL
;
1961 agp_bridge
.needs_scratch_page
= FALSE
;
1962 agp_bridge
.configure
= ali_configure
;
1963 agp_bridge
.fetch_size
= ali_fetch_size
;
1964 agp_bridge
.cleanup
= ali_cleanup
;
1965 agp_bridge
.tlb_flush
= ali_tlbflush
;
1966 agp_bridge
.mask_memory
= ali_mask_memory
;
1967 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1968 agp_bridge
.cache_flush
= global_cache_flush
;
1969 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1970 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1971 agp_bridge
.insert_memory
= agp_generic_insert_memory
;
1972 agp_bridge
.remove_memory
= agp_generic_remove_memory
;
1973 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1974 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1978 (void) pdev
; /* unused */
1981 #endif /* CONFIG_AGP_ALI */
1984 /* per-chipset initialization data.
1985 * note -- all chipsets for a single vendor MUST be grouped together
1988 unsigned short device_id
; /* first, to make table easier to read */
1989 unsigned short vendor_id
;
1990 enum chipset_type chipset
;
1991 const char *vendor_name
;
1992 const char *chipset_name
;
1993 int (*chipset_setup
) (struct pci_dev
*pdev
);
1994 } agp_bridge_info
[] __initdata
= {
1996 #ifdef CONFIG_AGP_ALI
1997 { PCI_DEVICE_ID_AL_M1541_0
,
2002 ali_generic_setup
},
2008 ali_generic_setup
},
2009 #endif /* CONFIG_AGP_ALI */
2011 #ifdef CONFIG_AGP_AMD
2012 { PCI_DEVICE_ID_AMD_IRONGATE_0
,
2017 amd_irongate_setup
},
2023 amd_irongate_setup
},
2024 #endif /* CONFIG_AGP_AMD */
2026 #ifdef CONFIG_AGP_INTEL
2027 { PCI_DEVICE_ID_INTEL_82443LX_0
,
2028 PCI_VENDOR_ID_INTEL
,
2032 intel_generic_setup
},
2033 { PCI_DEVICE_ID_INTEL_82443BX_0
,
2034 PCI_VENDOR_ID_INTEL
,
2038 intel_generic_setup
},
2039 { PCI_DEVICE_ID_INTEL_82443GX_0
,
2040 PCI_VENDOR_ID_INTEL
,
2044 intel_generic_setup
},
2045 { PCI_DEVICE_ID_INTEL_840_0
,
2046 PCI_VENDOR_ID_INTEL
,
2052 PCI_VENDOR_ID_INTEL
,
2056 intel_generic_setup
},
2057 #endif /* CONFIG_AGP_INTEL */
2059 #ifdef CONFIG_AGP_SIS
2060 { PCI_DEVICE_ID_SI_630
,
2065 sis_generic_setup
},
2066 { PCI_DEVICE_ID_SI_540
,
2071 sis_generic_setup
},
2072 { PCI_DEVICE_ID_SI_620
,
2077 sis_generic_setup
},
2078 { PCI_DEVICE_ID_SI_530
,
2083 sis_generic_setup
},
2084 { PCI_DEVICE_ID_SI_630
,
2089 sis_generic_setup
},
2090 { PCI_DEVICE_ID_SI_540
,
2095 sis_generic_setup
},
2096 { PCI_DEVICE_ID_SI_620
,
2101 sis_generic_setup
},
2102 { PCI_DEVICE_ID_SI_530
,
2107 sis_generic_setup
},
2113 sis_generic_setup
},
2114 #endif /* CONFIG_AGP_SIS */
2116 #ifdef CONFIG_AGP_VIA
2117 { PCI_DEVICE_ID_VIA_8371_0
,
2122 via_generic_setup
},
2123 { PCI_DEVICE_ID_VIA_8501_0
,
2128 via_generic_setup
},
2129 { PCI_DEVICE_ID_VIA_82C597_0
,
2134 via_generic_setup
},
2135 { PCI_DEVICE_ID_VIA_82C598_0
,
2140 via_generic_setup
},
2141 { PCI_DEVICE_ID_VIA_82C691_0
,
2146 via_generic_setup
},
2152 via_generic_setup
},
2153 #endif /* CONFIG_AGP_VIA */
2155 { 0, }, /* dummy final entry, always present */
2159 /* scan table above for supported devices */
2160 static int __init
agp_lookup_host_bridge (struct pci_dev
*pdev
)
2164 for (i
= 0; i
< arraysize (agp_bridge_info
); i
++)
2165 if (pdev
->vendor
== agp_bridge_info
[i
].vendor_id
)
2168 if (i
>= arraysize (agp_bridge_info
)) {
2169 printk (KERN_DEBUG PFX
"unsupported bridge\n");
2173 while ((i
< arraysize (agp_bridge_info
)) &&
2174 (agp_bridge_info
[i
].vendor_id
== pdev
->vendor
)) {
2175 if (pdev
->device
== agp_bridge_info
[i
].device_id
) {
2176 printk (KERN_INFO PFX
"Detected %s %s chipset\n",
2177 agp_bridge_info
[i
].vendor_name
,
2178 agp_bridge_info
[i
].chipset_name
);
2179 agp_bridge
.type
= agp_bridge_info
[i
].chipset
;
2180 return agp_bridge_info
[i
].chipset_setup (pdev
);
2186 i
--; /* point to vendor generic entry (device_id == 0) */
2188 /* try init anyway, if user requests it AND
2189 * there is a 'generic' bridge entry for this vendor */
2190 if (agp_try_unsupported
&& agp_bridge_info
[i
].device_id
== 0) {
2191 printk(KERN_WARNING PFX
"Trying generic %s routines"
2192 " for device id: %04x\n",
2193 agp_bridge_info
[i
].vendor_name
, pdev
->device
);
2194 agp_bridge
.type
= agp_bridge_info
[i
].chipset
;
2195 return agp_bridge_info
[i
].chipset_setup (pdev
);
2198 printk(KERN_ERR PFX
"Unsupported %s chipset (device id: %04x),"
2199 " you might want to try agp_try_unsupported=1.\n",
2200 agp_bridge_info
[i
].vendor_name
, pdev
->device
);
2205 /* Supported Device Scanning routine */
2207 static int __init
agp_find_supported_device(void)
2209 struct pci_dev
*dev
= NULL
;
2211 u32 cap_id
, scratch
;
2213 if ((dev
= pci_find_class(PCI_CLASS_BRIDGE_HOST
<< 8, NULL
)) == NULL
)
2216 agp_bridge
.dev
= dev
;
2218 /* Need to test for I810 here */
2219 #ifdef CONFIG_AGP_I810
2220 if (dev
->vendor
== PCI_VENDOR_ID_INTEL
) {
2221 struct pci_dev
*i810_dev
;
2223 switch (dev
->device
) {
2224 case PCI_DEVICE_ID_INTEL_810_0
:
2225 i810_dev
= pci_find_device(PCI_VENDOR_ID_INTEL
,
2226 PCI_DEVICE_ID_INTEL_810_1
,
2228 if (i810_dev
== NULL
) {
2229 printk(KERN_ERR PFX
"Detected an Intel i810,"
2230 " but could not find the secondary"
2234 printk(KERN_INFO PFX
"Detected an Intel "
2236 agp_bridge
.type
= INTEL_I810
;
2237 return intel_i810_setup (i810_dev
);
2239 case PCI_DEVICE_ID_INTEL_810_DC100_0
:
2240 i810_dev
= pci_find_device(PCI_VENDOR_ID_INTEL
,
2241 PCI_DEVICE_ID_INTEL_810_DC100_1
,
2243 if (i810_dev
== NULL
) {
2244 printk(KERN_ERR PFX
"Detected an Intel i810 "
2245 "DC100, but could not find the "
2246 "secondary device.\n");
2249 printk(KERN_INFO PFX
"Detected an Intel i810 "
2250 "DC100 Chipset.\n");
2251 agp_bridge
.type
= INTEL_I810
;
2252 return intel_i810_setup(i810_dev
);
2254 case PCI_DEVICE_ID_INTEL_810_E_0
:
2255 i810_dev
= pci_find_device(PCI_VENDOR_ID_INTEL
,
2256 PCI_DEVICE_ID_INTEL_810_E_1
,
2258 if (i810_dev
== NULL
) {
2259 printk(KERN_ERR PFX
"Detected an Intel i810 E"
2260 ", but could not find the secondary "
2264 printk(KERN_INFO PFX
"Detected an Intel i810 E "
2266 agp_bridge
.type
= INTEL_I810
;
2267 return intel_i810_setup(i810_dev
);
2273 #endif /* CONFIG_AGP_I810 */
2276 pci_read_config_dword(dev
, 0x04, &scratch
);
2277 if (!(scratch
& 0x00100000))
2280 pci_read_config_byte(dev
, 0x34, &cap_ptr
);
2281 if (cap_ptr
!= 0x00) {
2283 pci_read_config_dword(dev
, cap_ptr
, &cap_id
);
2285 if ((cap_id
& 0xff) != 0x02)
2286 cap_ptr
= (cap_id
>> 8) & 0xff;
2288 while (((cap_id
& 0xff) != 0x02) && (cap_ptr
!= 0x00));
2290 if (cap_ptr
== 0x00)
2292 agp_bridge
.capndx
= cap_ptr
;
2294 /* Fill in the mode register */
2295 pci_read_config_dword(agp_bridge
.dev
,
2296 agp_bridge
.capndx
+ 4,
2299 /* probe for known chipsets */
2300 return agp_lookup_host_bridge (dev
);
2303 struct agp_max_table
{
2308 static struct agp_max_table maxes_table
[9] __initdata
=
2321 static int __init
agp_find_max (void)
2323 long memory
, index
, result
;
2325 memory
= virt_to_phys(high_memory
) >> 20;
2328 while ((memory
> maxes_table
[index
].mem
) &&
2333 result
= maxes_table
[index
- 1].agp
+
2334 ( (memory
- maxes_table
[index
- 1].mem
) *
2335 (maxes_table
[index
].agp
- maxes_table
[index
- 1].agp
)) /
2336 (maxes_table
[index
].mem
- maxes_table
[index
- 1].mem
);
2338 printk(KERN_INFO PFX
"Maximum main memory to use "
2339 "for agp memory: %ldM\n", result
);
2340 result
= result
<< (20 - PAGE_SHIFT
);
2344 #define AGPGART_VERSION_MAJOR 0
2345 #define AGPGART_VERSION_MINOR 99
2347 static agp_version agp_current_version
=
2349 AGPGART_VERSION_MAJOR
,
2350 AGPGART_VERSION_MINOR
2353 static int __init
agp_backend_initialize(void)
2355 int size_value
, rc
, got_gatt
=0, got_keylist
=0;
2357 memset(&agp_bridge
, 0, sizeof(struct agp_bridge_data
));
2358 agp_bridge
.type
= NOT_SUPPORTED
;
2359 agp_bridge
.max_memory_agp
= agp_find_max();
2360 agp_bridge
.version
= &agp_current_version
;
2362 rc
= agp_find_supported_device();
2364 /* not KERN_ERR because error msg should have already printed */
2365 printk(KERN_DEBUG PFX
"no supported devices found.\n");
2369 if (agp_bridge
.needs_scratch_page
== TRUE
) {
2370 agp_bridge
.scratch_page
= agp_alloc_page();
2372 if (agp_bridge
.scratch_page
== 0) {
2373 printk(KERN_ERR PFX
"unable to get memory for "
2377 agp_bridge
.scratch_page
=
2378 virt_to_phys((void *) agp_bridge
.scratch_page
);
2379 agp_bridge
.scratch_page
=
2380 agp_bridge
.mask_memory(agp_bridge
.scratch_page
, 0);
2383 size_value
= agp_bridge
.fetch_size();
2385 if (size_value
== 0) {
2386 printk(KERN_ERR PFX
"unable to detrimine aperture size.\n");
2390 if (agp_bridge
.create_gatt_table()) {
2391 printk(KERN_ERR PFX
"unable to get memory for graphics "
2392 "translation table.\n");
2398 agp_bridge
.key_list
= vmalloc(PAGE_SIZE
* 4);
2399 if (agp_bridge
.key_list
== NULL
) {
2400 printk(KERN_ERR PFX
"error allocating memory for key lists.\n");
2406 /* FIXME vmalloc'd memory not guaranteed contiguous */
2407 memset(agp_bridge
.key_list
, 0, PAGE_SIZE
* 4);
2409 if (agp_bridge
.configure()) {
2410 printk(KERN_ERR PFX
"error configuring host chipset.\n");
2415 printk(KERN_INFO PFX
"AGP aperture is %dM @ 0x%lx\n",
2416 size_value
, agp_bridge
.gart_bus_addr
);
2421 if (agp_bridge
.needs_scratch_page
== TRUE
) {
2422 agp_bridge
.scratch_page
&= ~(0x00000fff);
2423 agp_destroy_page((unsigned long)
2424 phys_to_virt(agp_bridge
.scratch_page
));
2427 agp_bridge
.free_gatt_table();
2429 vfree(agp_bridge
.key_list
);
2434 /* cannot be __exit b/c as it could be called from __init code */
2435 static void agp_backend_cleanup(void)
2437 agp_bridge
.cleanup();
2438 agp_bridge
.free_gatt_table();
2439 vfree(agp_bridge
.key_list
);
2441 if (agp_bridge
.needs_scratch_page
== TRUE
) {
2442 agp_bridge
.scratch_page
&= ~(0x00000fff);
2443 agp_destroy_page((unsigned long)
2444 phys_to_virt(agp_bridge
.scratch_page
));
2448 extern int agp_frontend_initialize(void);
2449 extern void agp_frontend_cleanup(void);
2451 static int __init
agp_init(void)
2455 printk(KERN_INFO
"Linux agpgart interface v%d.%d (c) Jeff Hartmann\n",
2456 AGPGART_VERSION_MAJOR
, AGPGART_VERSION_MINOR
);
2458 ret_val
= agp_backend_initialize();
2462 ret_val
= agp_frontend_initialize();
2464 agp_backend_cleanup();
2471 static void __exit
agp_cleanup(void)
2473 agp_frontend_cleanup();
2474 agp_backend_cleanup();
2477 module_init(agp_init
);
2478 module_exit(agp_cleanup
);