2 * AGPGART module version 0.99
3 * Copyright (C) 1999 Jeff Hartmann
4 * Copyright (C) 1999 Precision Insight, Inc.
5 * Copyright (C) 1999 Xi Graphics, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
23 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/config.h>
27 #include <linux/version.h>
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/kernel.h>
31 #include <linux/sched.h>
33 #include <linux/string.h>
34 #include <linux/errno.h>
35 #include <linux/malloc.h>
36 #include <linux/vmalloc.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/pagemap.h>
40 #include <linux/miscdevice.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
46 #include <linux/agp_backend.h>
49 MODULE_AUTHOR("Jeff Hartmann <jhartmann@precisioninsight.com>");
50 MODULE_PARM(agp_try_unsupported
, "1i");
51 EXPORT_SYMBOL(agp_free_memory
);
52 EXPORT_SYMBOL(agp_allocate_memory
);
53 EXPORT_SYMBOL(agp_copy_info
);
54 EXPORT_SYMBOL(agp_bind_memory
);
55 EXPORT_SYMBOL(agp_unbind_memory
);
56 EXPORT_SYMBOL(agp_enable
);
57 EXPORT_SYMBOL(agp_backend_acquire
);
58 EXPORT_SYMBOL(agp_backend_release
);
60 static void flush_cache(void);
62 static struct agp_bridge_data agp_bridge
;
63 static int agp_try_unsupported __initdata
= 0;
66 static inline void flush_cache(void)
69 asm volatile ("wbinvd":::"memory");
70 #elif defined(__alpha__) || defined(__ia64__)
71 /* ??? I wonder if we'll really need to flush caches, or if the
72 core logic can manage to keep the system coherent. The ARM
73 speaks only of using `cflush' to get things in memory in
74 preparation for power failure.
76 If we do need to call `cflush', we'll need a target page,
77 as we can only flush one page at a time.
79 Ditto for IA-64. --davidm 00/08/07 */
82 #error "Please define flush_cache."
87 static atomic_t cpus_waiting
;
89 static void ipi_handler(void *null
)
92 atomic_dec(&cpus_waiting
);
93 while (atomic_read(&cpus_waiting
) > 0)
97 static void smp_flush_cache(void)
99 atomic_set(&cpus_waiting
, smp_num_cpus
- 1);
100 if (smp_call_function(ipi_handler
, NULL
, 1, 0) != 0)
101 panic(PFX
"timed out waiting for the other CPUs!\n");
103 while (atomic_read(&cpus_waiting
) > 0)
106 #define global_cache_flush smp_flush_cache
107 #else /* CONFIG_SMP */
108 #define global_cache_flush flush_cache
109 #endif /* CONFIG_SMP */
111 int agp_backend_acquire(void)
113 if (agp_bridge
.type
== NOT_SUPPORTED
) {
116 atomic_inc(&agp_bridge
.agp_in_use
);
118 if (atomic_read(&agp_bridge
.agp_in_use
) != 1) {
119 atomic_dec(&agp_bridge
.agp_in_use
);
126 void agp_backend_release(void)
128 if (agp_bridge
.type
== NOT_SUPPORTED
) {
131 atomic_dec(&agp_bridge
.agp_in_use
);
136 * Basic Page Allocation Routines -
137 * These routines handle page allocation
138 * and by default they reserve the allocated
139 * memory. They also handle incrementing the
140 * current_memory_agp value, Which is checked
141 * against a maximum value.
144 static unsigned long agp_alloc_page(void)
148 pt
= (void *) __get_free_page(GFP_KERNEL
);
152 atomic_inc(&virt_to_page(pt
)->count
);
153 set_bit(PG_locked
, &virt_to_page(pt
)->flags
);
154 atomic_inc(&agp_bridge
.current_memory_agp
);
155 return (unsigned long) pt
;
158 static void agp_destroy_page(unsigned long page
)
160 void *pt
= (void *) page
;
165 atomic_dec(&virt_to_page(pt
)->count
);
166 clear_bit(PG_locked
, &virt_to_page(pt
)->flags
);
167 wake_up(&virt_to_page(pt
)->wait
);
168 free_page((unsigned long) pt
);
169 atomic_dec(&agp_bridge
.current_memory_agp
);
172 /* End Basic Page Allocation Routines */
175 * Generic routines for handling agp_memory structures -
176 * They use the basic page allocation routines to do the
181 static void agp_free_key(int key
)
188 clear_bit(key
, agp_bridge
.key_list
);
192 static int agp_get_key(void)
196 bit
= find_first_zero_bit(agp_bridge
.key_list
, MAXKEY
);
198 set_bit(bit
, agp_bridge
.key_list
);
204 static agp_memory
*agp_create_memory(int scratch_pages
)
208 new = kmalloc(sizeof(agp_memory
), GFP_KERNEL
);
213 memset(new, 0, sizeof(agp_memory
));
214 new->key
= agp_get_key();
220 new->memory
= vmalloc(PAGE_SIZE
* scratch_pages
);
222 if (new->memory
== NULL
) {
223 agp_free_key(new->key
);
227 new->num_scratch_pages
= scratch_pages
;
231 void agp_free_memory(agp_memory
* curr
)
235 if ((agp_bridge
.type
== NOT_SUPPORTED
) || (curr
== NULL
)) {
238 if (curr
->is_bound
== TRUE
) {
239 agp_unbind_memory(curr
);
241 if (curr
->type
!= 0) {
242 agp_bridge
.free_by_type(curr
);
245 if (curr
->page_count
!= 0) {
246 for (i
= 0; i
< curr
->page_count
; i
++) {
247 curr
->memory
[i
] &= ~(0x00000fff);
248 agp_destroy_page((unsigned long)
249 phys_to_virt(curr
->memory
[i
]));
252 agp_free_key(curr
->key
);
258 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
260 agp_memory
*agp_allocate_memory(size_t page_count
, u32 type
)
266 if (agp_bridge
.type
== NOT_SUPPORTED
) {
269 if ((atomic_read(&agp_bridge
.current_memory_agp
) + page_count
) >
270 agp_bridge
.max_memory_agp
) {
275 new = agp_bridge
.alloc_by_type(page_count
, type
);
278 /* We always increase the module count, since free auto-decrements
284 scratch_pages
= (page_count
+ ENTRIES_PER_PAGE
- 1) / ENTRIES_PER_PAGE
;
286 new = agp_create_memory(scratch_pages
);
292 for (i
= 0; i
< page_count
; i
++) {
293 new->memory
[i
] = agp_alloc_page();
295 if (new->memory
[i
] == 0) {
296 /* Free this structure */
297 agp_free_memory(new);
301 agp_bridge
.mask_memory(
302 virt_to_phys((void *) new->memory
[i
]),
310 /* End - Generic routines for handling agp_memory structures */
312 static int agp_return_size(void)
317 temp
= agp_bridge
.current_size
;
319 switch (agp_bridge
.size_type
) {
321 current_size
= A_SIZE_8(temp
)->size
;
324 current_size
= A_SIZE_16(temp
)->size
;
327 current_size
= A_SIZE_32(temp
)->size
;
330 current_size
= A_SIZE_LVL2(temp
)->size
;
332 case FIXED_APER_SIZE
:
333 current_size
= A_SIZE_FIX(temp
)->size
;
343 /* Routine to copy over information structure */
345 void agp_copy_info(agp_kern_info
* info
)
347 memset(info
, 0, sizeof(agp_kern_info
));
348 if (agp_bridge
.type
== NOT_SUPPORTED
) {
349 info
->chipset
= agp_bridge
.type
;
352 info
->version
.major
= agp_bridge
.version
->major
;
353 info
->version
.minor
= agp_bridge
.version
->minor
;
354 info
->device
= agp_bridge
.dev
;
355 info
->chipset
= agp_bridge
.type
;
356 info
->mode
= agp_bridge
.mode
;
357 info
->aper_base
= agp_bridge
.gart_bus_addr
;
358 info
->aper_size
= agp_return_size();
359 info
->max_memory
= agp_bridge
.max_memory_agp
;
360 info
->current_memory
= atomic_read(&agp_bridge
.current_memory_agp
);
363 /* End - Routine to copy over information structure */
366 * Routines for handling swapping of agp_memory into the GATT -
367 * These routines take agp_memory and insert them into the GATT.
368 * They call device specific routines to actually write to the GATT.
371 int agp_bind_memory(agp_memory
* curr
, off_t pg_start
)
375 if ((agp_bridge
.type
== NOT_SUPPORTED
) ||
376 (curr
== NULL
) || (curr
->is_bound
== TRUE
)) {
379 if (curr
->is_flushed
== FALSE
) {
381 curr
->is_flushed
= TRUE
;
383 ret_val
= agp_bridge
.insert_memory(curr
, pg_start
, curr
->type
);
388 curr
->is_bound
= TRUE
;
389 curr
->pg_start
= pg_start
;
393 int agp_unbind_memory(agp_memory
* curr
)
397 if ((agp_bridge
.type
== NOT_SUPPORTED
) || (curr
== NULL
)) {
400 if (curr
->is_bound
!= TRUE
) {
403 ret_val
= agp_bridge
.remove_memory(curr
, curr
->pg_start
, curr
->type
);
408 curr
->is_bound
= FALSE
;
413 /* End - Routines for handling swapping of agp_memory into the GATT */
416 * Driver routines - start
417 * Currently this module supports the following chipsets:
418 * i810, 440lx, 440bx, 440gx, i840, i850, via vp3, via mvp3, via kx133,
419 * via kt133, amd irongate, ALi M1541, and generic support for the SiS
423 /* Generic Agp routines - Start */
425 static void agp_generic_agp_enable(u32 mode
)
427 struct pci_dev
*device
= NULL
;
428 u32 command
, scratch
, cap_id
;
431 pci_read_config_dword(agp_bridge
.dev
,
432 agp_bridge
.capndx
+ 4,
436 * PASS1: go throu all devices that claim to be
437 * AGP devices and collect their data.
440 while ((device
= pci_find_class(PCI_CLASS_DISPLAY_VGA
<< 8,
442 pci_read_config_dword(device
, 0x04, &scratch
);
444 if (!(scratch
& 0x00100000))
447 pci_read_config_byte(device
, 0x34, &cap_ptr
);
449 if (cap_ptr
!= 0x00) {
451 pci_read_config_dword(device
,
454 if ((cap_id
& 0xff) != 0x02)
455 cap_ptr
= (cap_id
>> 8) & 0xff;
457 while (((cap_id
& 0xff) != 0x02) && (cap_ptr
!= 0x00));
459 if (cap_ptr
!= 0x00) {
461 * Ok, here we have a AGP device. Disable impossible
462 * settings, and adjust the readqueue to the minimum.
465 pci_read_config_dword(device
, cap_ptr
+ 4, &scratch
);
467 /* adjust RQ depth */
469 ((command
& ~0xff000000) |
470 min((mode
& 0xff000000),
471 min((command
& 0xff000000),
472 (scratch
& 0xff000000))));
474 /* disable SBA if it's not supported */
475 if (!((command
& 0x00000200) &&
476 (scratch
& 0x00000200) &&
477 (mode
& 0x00000200)))
478 command
&= ~0x00000200;
480 /* disable FW if it's not supported */
481 if (!((command
& 0x00000010) &&
482 (scratch
& 0x00000010) &&
483 (mode
& 0x00000010)))
484 command
&= ~0x00000010;
486 if (!((command
& 4) &&
489 command
&= ~0x00000004;
491 if (!((command
& 2) &&
494 command
&= ~0x00000002;
496 if (!((command
& 1) &&
499 command
&= ~0x00000001;
503 * PASS2: Figure out the 4X/2X/1X setting and enable the
504 * target (our motherboard chipset).
508 command
&= ~3; /* 4X */
511 command
&= ~5; /* 2X */
514 command
&= ~6; /* 1X */
516 command
|= 0x00000100;
518 pci_write_config_dword(agp_bridge
.dev
,
519 agp_bridge
.capndx
+ 8,
523 * PASS3: Go throu all AGP devices and update the
527 while ((device
= pci_find_class(PCI_CLASS_DISPLAY_VGA
<< 8,
529 pci_read_config_dword(device
, 0x04, &scratch
);
531 if (!(scratch
& 0x00100000))
534 pci_read_config_byte(device
, 0x34, &cap_ptr
);
536 if (cap_ptr
!= 0x00) {
538 pci_read_config_dword(device
,
541 if ((cap_id
& 0xff) != 0x02)
542 cap_ptr
= (cap_id
>> 8) & 0xff;
544 while (((cap_id
& 0xff) != 0x02) && (cap_ptr
!= 0x00));
547 pci_write_config_dword(device
, cap_ptr
+ 8, command
);
551 static int agp_generic_create_gatt_table(void)
562 /* The generic routines can't handle 2 level gatt's */
563 if (agp_bridge
.size_type
== LVL2_APER_SIZE
) {
568 i
= agp_bridge
.aperture_size_idx
;
569 temp
= agp_bridge
.current_size
;
570 size
= page_order
= num_entries
= 0;
572 if (agp_bridge
.size_type
!= FIXED_APER_SIZE
) {
574 switch (agp_bridge
.size_type
) {
576 size
= A_SIZE_8(temp
)->size
;
578 A_SIZE_8(temp
)->page_order
;
580 A_SIZE_8(temp
)->num_entries
;
583 size
= A_SIZE_16(temp
)->size
;
584 page_order
= A_SIZE_16(temp
)->page_order
;
585 num_entries
= A_SIZE_16(temp
)->num_entries
;
588 size
= A_SIZE_32(temp
)->size
;
589 page_order
= A_SIZE_32(temp
)->page_order
;
590 num_entries
= A_SIZE_32(temp
)->num_entries
;
592 /* This case will never really happen. */
593 case FIXED_APER_SIZE
:
596 size
= page_order
= num_entries
= 0;
600 table
= (char *) __get_free_pages(GFP_KERNEL
,
605 switch (agp_bridge
.size_type
) {
607 agp_bridge
.current_size
= A_IDX8();
610 agp_bridge
.current_size
= A_IDX16();
613 agp_bridge
.current_size
= A_IDX32();
615 /* This case will never really
618 case FIXED_APER_SIZE
:
621 agp_bridge
.current_size
=
622 agp_bridge
.current_size
;
626 agp_bridge
.aperture_size_idx
= i
;
628 } while ((table
== NULL
) &&
629 (i
< agp_bridge
.num_aperture_sizes
));
631 size
= ((aper_size_info_fixed
*) temp
)->size
;
632 page_order
= ((aper_size_info_fixed
*) temp
)->page_order
;
633 num_entries
= ((aper_size_info_fixed
*) temp
)->num_entries
;
634 table
= (char *) __get_free_pages(GFP_KERNEL
, page_order
);
640 table_end
= table
+ ((PAGE_SIZE
* (1 << page_order
)) - 1);
642 for (page
= virt_to_page(table
); page
<= virt_to_page(table_end
); page
++)
643 set_bit(PG_reserved
, &page
->flags
);
645 agp_bridge
.gatt_table_real
= (unsigned long *) table
;
647 agp_bridge
.gatt_table
= ioremap_nocache(virt_to_phys(table
),
648 (PAGE_SIZE
* (1 << page_order
)));
651 if (agp_bridge
.gatt_table
== NULL
) {
652 for (page
= virt_to_page(table
); page
<= virt_to_page(table_end
); page
++)
653 clear_bit(PG_reserved
, &page
->flags
);
655 free_pages((unsigned long) table
, page_order
);
659 agp_bridge
.gatt_bus_addr
= virt_to_phys(agp_bridge
.gatt_table_real
);
661 for (i
= 0; i
< num_entries
; i
++) {
662 agp_bridge
.gatt_table
[i
] =
663 (unsigned long) agp_bridge
.scratch_page
;
669 static int agp_generic_free_gatt_table(void)
672 char *table
, *table_end
;
676 temp
= agp_bridge
.current_size
;
678 switch (agp_bridge
.size_type
) {
680 page_order
= A_SIZE_8(temp
)->page_order
;
683 page_order
= A_SIZE_16(temp
)->page_order
;
686 page_order
= A_SIZE_32(temp
)->page_order
;
688 case FIXED_APER_SIZE
:
689 page_order
= A_SIZE_FIX(temp
)->page_order
;
692 /* The generic routines can't deal with 2 level gatt's */
700 /* Do not worry about freeing memory, because if this is
701 * called, then all agp memory is deallocated and removed
705 iounmap(agp_bridge
.gatt_table
);
706 table
= (char *) agp_bridge
.gatt_table_real
;
707 table_end
= table
+ ((PAGE_SIZE
* (1 << page_order
)) - 1);
709 for (page
= virt_to_page(table
); page
<= virt_to_page(table_end
); page
++)
710 clear_bit(PG_reserved
, &page
->flags
);
712 free_pages((unsigned long) agp_bridge
.gatt_table_real
, page_order
);
716 static int agp_generic_insert_memory(agp_memory
* mem
,
717 off_t pg_start
, int type
)
719 int i
, j
, num_entries
;
722 temp
= agp_bridge
.current_size
;
724 switch (agp_bridge
.size_type
) {
726 num_entries
= A_SIZE_8(temp
)->num_entries
;
729 num_entries
= A_SIZE_16(temp
)->num_entries
;
732 num_entries
= A_SIZE_32(temp
)->num_entries
;
734 case FIXED_APER_SIZE
:
735 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
738 /* The generic routines can't deal with 2 level gatt's */
746 if (type
!= 0 || mem
->type
!= 0) {
747 /* The generic routines know nothing of memory types */
750 if ((pg_start
+ mem
->page_count
) > num_entries
) {
755 while (j
< (pg_start
+ mem
->page_count
)) {
756 if (!PGE_EMPTY(agp_bridge
.gatt_table
[j
])) {
762 if (mem
->is_flushed
== FALSE
) {
764 mem
->is_flushed
= TRUE
;
766 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
767 agp_bridge
.gatt_table
[j
] = mem
->memory
[i
];
770 agp_bridge
.tlb_flush(mem
);
774 static int agp_generic_remove_memory(agp_memory
* mem
, off_t pg_start
,
779 if (type
!= 0 || mem
->type
!= 0) {
780 /* The generic routines know nothing of memory types */
783 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
784 agp_bridge
.gatt_table
[i
] =
785 (unsigned long) agp_bridge
.scratch_page
;
788 agp_bridge
.tlb_flush(mem
);
792 static agp_memory
*agp_generic_alloc_by_type(size_t page_count
, int type
)
797 static void agp_generic_free_by_type(agp_memory
* curr
)
799 if (curr
->memory
!= NULL
) {
802 agp_free_key(curr
->key
);
806 void agp_enable(u32 mode
)
808 if (agp_bridge
.type
== NOT_SUPPORTED
) return;
809 agp_bridge
.agp_enable(mode
);
812 /* End - Generic Agp routines */
814 #ifdef CONFIG_AGP_I810
815 static aper_size_info_fixed intel_i810_sizes
[] =
818 /* The 32M mode still requires a 64k gatt */
822 #define AGP_DCACHE_MEMORY 1
823 #define AGP_PHYS_MEMORY 2
825 static gatt_mask intel_i810_masks
[] =
828 {(I810_PTE_VALID
| I810_PTE_LOCAL
), AGP_DCACHE_MEMORY
},
832 static struct _intel_i810_private
{
833 struct pci_dev
*i810_dev
; /* device one */
834 volatile u8
*registers
;
835 int num_dcache_entries
;
836 } intel_i810_private
;
838 static int intel_i810_fetch_size(void)
841 aper_size_info_fixed
*values
;
843 pci_read_config_dword(agp_bridge
.dev
, I810_SMRAM_MISCC
, &smram_miscc
);
844 values
= A_SIZE_FIX(agp_bridge
.aperture_sizes
);
846 if ((smram_miscc
& I810_GMS
) == I810_GMS_DISABLE
) {
847 printk(KERN_WARNING PFX
"i810 is disabled\n");
850 if ((smram_miscc
& I810_GFX_MEM_WIN_SIZE
) == I810_GFX_MEM_WIN_32M
) {
851 agp_bridge
.previous_size
=
852 agp_bridge
.current_size
= (void *) (values
+ 1);
853 agp_bridge
.aperture_size_idx
= 1;
854 return values
[1].size
;
856 agp_bridge
.previous_size
=
857 agp_bridge
.current_size
= (void *) (values
);
858 agp_bridge
.aperture_size_idx
= 0;
859 return values
[0].size
;
865 static int intel_i810_configure(void)
867 aper_size_info_fixed
*current_size
;
871 current_size
= A_SIZE_FIX(agp_bridge
.current_size
);
873 pci_read_config_dword(intel_i810_private
.i810_dev
, I810_MMADDR
, &temp
);
876 intel_i810_private
.registers
=
877 (volatile u8
*) ioremap(temp
, 128 * 4096);
879 if ((INREG32(intel_i810_private
.registers
, I810_DRAM_CTL
)
880 & I810_DRAM_ROW_0
) == I810_DRAM_ROW_0_SDRAM
) {
881 /* This will need to be dynamically assigned */
882 printk(KERN_INFO PFX
"detected 4MB dedicated video ram.\n");
883 intel_i810_private
.num_dcache_entries
= 1024;
885 pci_read_config_dword(intel_i810_private
.i810_dev
, I810_GMADDR
, &temp
);
886 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
887 OUTREG32(intel_i810_private
.registers
, I810_PGETBL_CTL
,
888 agp_bridge
.gatt_bus_addr
| I810_PGETBL_ENABLED
);
891 if (agp_bridge
.needs_scratch_page
== TRUE
) {
892 for (i
= 0; i
< current_size
->num_entries
; i
++) {
893 OUTREG32(intel_i810_private
.registers
,
894 I810_PTE_BASE
+ (i
* 4),
895 agp_bridge
.scratch_page
);
901 static void intel_i810_cleanup(void)
903 OUTREG32(intel_i810_private
.registers
, I810_PGETBL_CTL
, 0);
904 iounmap((void *) intel_i810_private
.registers
);
907 static void intel_i810_tlbflush(agp_memory
* mem
)
912 static void intel_i810_agp_enable(u32 mode
)
917 static int intel_i810_insert_entries(agp_memory
* mem
, off_t pg_start
,
920 int i
, j
, num_entries
;
923 temp
= agp_bridge
.current_size
;
924 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
926 if ((pg_start
+ mem
->page_count
) > num_entries
) {
929 for (j
= pg_start
; j
< (pg_start
+ mem
->page_count
); j
++) {
930 if (!PGE_EMPTY(agp_bridge
.gatt_table
[j
])) {
935 if (type
!= 0 || mem
->type
!= 0) {
936 if ((type
== AGP_DCACHE_MEMORY
) &&
937 (mem
->type
== AGP_DCACHE_MEMORY
)) {
941 i
< (pg_start
+ mem
->page_count
); i
++) {
942 OUTREG32(intel_i810_private
.registers
,
943 I810_PTE_BASE
+ (i
* 4),
944 (i
* 4096) | I810_PTE_LOCAL
|
948 agp_bridge
.tlb_flush(mem
);
951 if((type
== AGP_PHYS_MEMORY
) &&
952 (mem
->type
== AGP_PHYS_MEMORY
)) {
960 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
961 OUTREG32(intel_i810_private
.registers
,
962 I810_PTE_BASE
+ (j
* 4), mem
->memory
[i
]);
966 agp_bridge
.tlb_flush(mem
);
970 static int intel_i810_remove_entries(agp_memory
* mem
, off_t pg_start
,
975 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
976 OUTREG32(intel_i810_private
.registers
,
977 I810_PTE_BASE
+ (i
* 4),
978 agp_bridge
.scratch_page
);
982 agp_bridge
.tlb_flush(mem
);
986 static agp_memory
*intel_i810_alloc_by_type(size_t pg_count
, int type
)
990 if (type
== AGP_DCACHE_MEMORY
) {
991 if (pg_count
!= intel_i810_private
.num_dcache_entries
) {
994 new = agp_create_memory(1);
999 new->type
= AGP_DCACHE_MEMORY
;
1000 new->page_count
= pg_count
;
1001 new->num_scratch_pages
= 0;
1006 if(type
== AGP_PHYS_MEMORY
) {
1007 /* The I810 requires a physical address to program
1008 * it's mouse pointer into hardware. However the
1009 * Xserver still writes to it through the agp
1012 if (pg_count
!= 1) {
1015 new = agp_create_memory(1);
1021 new->memory
[0] = agp_alloc_page();
1023 if (new->memory
[0] == 0) {
1024 /* Free this structure */
1025 agp_free_memory(new);
1029 agp_bridge
.mask_memory(
1030 virt_to_phys((void *) new->memory
[0]),
1032 new->page_count
= 1;
1033 new->num_scratch_pages
= 1;
1034 new->type
= AGP_PHYS_MEMORY
;
1035 new->physical
= virt_to_phys((void *) new->memory
[0]);
1042 static void intel_i810_free_by_type(agp_memory
* curr
)
1044 agp_free_key(curr
->key
);
1045 if(curr
->type
== AGP_PHYS_MEMORY
) {
1046 agp_destroy_page((unsigned long)
1047 phys_to_virt(curr
->memory
[0]));
1048 vfree(curr
->memory
);
1054 static unsigned long intel_i810_mask_memory(unsigned long addr
, int type
)
1056 /* Type checking must be done elsewhere */
1057 return addr
| agp_bridge
.masks
[type
].mask
;
1060 static int __init
intel_i810_setup(struct pci_dev
*i810_dev
)
1062 intel_i810_private
.i810_dev
= i810_dev
;
1064 agp_bridge
.masks
= intel_i810_masks
;
1065 agp_bridge
.num_of_masks
= 2;
1066 agp_bridge
.aperture_sizes
= (void *) intel_i810_sizes
;
1067 agp_bridge
.size_type
= FIXED_APER_SIZE
;
1068 agp_bridge
.num_aperture_sizes
= 2;
1069 agp_bridge
.dev_private_data
= (void *) &intel_i810_private
;
1070 agp_bridge
.needs_scratch_page
= TRUE
;
1071 agp_bridge
.configure
= intel_i810_configure
;
1072 agp_bridge
.fetch_size
= intel_i810_fetch_size
;
1073 agp_bridge
.cleanup
= intel_i810_cleanup
;
1074 agp_bridge
.tlb_flush
= intel_i810_tlbflush
;
1075 agp_bridge
.mask_memory
= intel_i810_mask_memory
;
1076 agp_bridge
.agp_enable
= intel_i810_agp_enable
;
1077 agp_bridge
.cache_flush
= global_cache_flush
;
1078 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1079 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1080 agp_bridge
.insert_memory
= intel_i810_insert_entries
;
1081 agp_bridge
.remove_memory
= intel_i810_remove_entries
;
1082 agp_bridge
.alloc_by_type
= intel_i810_alloc_by_type
;
1083 agp_bridge
.free_by_type
= intel_i810_free_by_type
;
1088 #endif /* CONFIG_AGP_I810 */
1090 #ifdef CONFIG_AGP_INTEL
1092 static int intel_fetch_size(void)
1096 aper_size_info_16
*values
;
1098 pci_read_config_word(agp_bridge
.dev
, INTEL_APSIZE
, &temp
);
1099 values
= A_SIZE_16(agp_bridge
.aperture_sizes
);
1101 for (i
= 0; i
< agp_bridge
.num_aperture_sizes
; i
++) {
1102 if (temp
== values
[i
].size_value
) {
1103 agp_bridge
.previous_size
=
1104 agp_bridge
.current_size
= (void *) (values
+ i
);
1105 agp_bridge
.aperture_size_idx
= i
;
1106 return values
[i
].size
;
1113 static void intel_tlbflush(agp_memory
* mem
)
1115 pci_write_config_dword(agp_bridge
.dev
, INTEL_AGPCTRL
, 0x2200);
1116 pci_write_config_dword(agp_bridge
.dev
, INTEL_AGPCTRL
, 0x2280);
1119 static void intel_cleanup(void)
1122 aper_size_info_16
*previous_size
;
1124 previous_size
= A_SIZE_16(agp_bridge
.previous_size
);
1125 pci_read_config_word(agp_bridge
.dev
, INTEL_NBXCFG
, &temp
);
1126 pci_write_config_word(agp_bridge
.dev
, INTEL_NBXCFG
, temp
& ~(1 << 9));
1127 pci_write_config_word(agp_bridge
.dev
, INTEL_APSIZE
,
1128 previous_size
->size_value
);
1131 static int intel_configure(void)
1135 aper_size_info_16
*current_size
;
1137 current_size
= A_SIZE_16(agp_bridge
.current_size
);
1140 pci_write_config_word(agp_bridge
.dev
, INTEL_APSIZE
,
1141 current_size
->size_value
);
1143 /* address to map to */
1144 pci_read_config_dword(agp_bridge
.dev
, INTEL_APBASE
, &temp
);
1145 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1147 /* attbase - aperture base */
1148 pci_write_config_dword(agp_bridge
.dev
, INTEL_ATTBASE
,
1149 agp_bridge
.gatt_bus_addr
);
1152 pci_write_config_dword(agp_bridge
.dev
, INTEL_AGPCTRL
, 0x2280);
1155 pci_read_config_word(agp_bridge
.dev
, INTEL_NBXCFG
, &temp2
);
1156 pci_write_config_word(agp_bridge
.dev
, INTEL_NBXCFG
,
1157 (temp2
& ~(1 << 10)) | (1 << 9));
1158 /* clear any possible error conditions */
1159 pci_write_config_byte(agp_bridge
.dev
, INTEL_ERRSTS
+ 1, 7);
1163 static int intel_840_configure(void)
1167 aper_size_info_16
*current_size
;
1169 current_size
= A_SIZE_16(agp_bridge
.current_size
);
1172 pci_write_config_byte(agp_bridge
.dev
, INTEL_APSIZE
,
1173 (char)current_size
->size_value
);
1175 /* address to map to */
1176 pci_read_config_dword(agp_bridge
.dev
, INTEL_APBASE
, &temp
);
1177 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1179 /* attbase - aperture base */
1180 pci_write_config_dword(agp_bridge
.dev
, INTEL_ATTBASE
,
1181 agp_bridge
.gatt_bus_addr
);
1184 pci_write_config_dword(agp_bridge
.dev
, INTEL_AGPCTRL
, 0x0000);
1187 pci_read_config_word(agp_bridge
.dev
, INTEL_I840_MCHCFG
, &temp2
);
1188 pci_write_config_word(agp_bridge
.dev
, INTEL_I840_MCHCFG
,
1190 /* clear any possible error conditions */
1191 pci_write_config_word(agp_bridge
.dev
, INTEL_I840_ERRSTS
, 0xc000);
1195 static int intel_850_configure(void)
1199 aper_size_info_16
*current_size
;
1201 current_size
= A_SIZE_16(agp_bridge
.current_size
);
1204 pci_write_config_byte(agp_bridge
.dev
, INTEL_APSIZE
,
1205 (char)current_size
->size_value
);
1207 /* address to map to */
1208 pci_read_config_dword(agp_bridge
.dev
, INTEL_APBASE
, &temp
);
1209 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1211 /* attbase - aperture base */
1212 pci_write_config_dword(agp_bridge
.dev
, INTEL_ATTBASE
,
1213 agp_bridge
.gatt_bus_addr
);
1216 pci_write_config_dword(agp_bridge
.dev
, INTEL_AGPCTRL
, 0x0000);
1219 pci_read_config_word(agp_bridge
.dev
, INTEL_I850_MCHCFG
, &temp2
);
1220 pci_write_config_word(agp_bridge
.dev
, INTEL_I850_MCHCFG
,
1222 /* clear any possible AGP-related error conditions */
1223 pci_write_config_word(agp_bridge
.dev
, INTEL_I850_ERRSTS
, 0x001c);
1227 static unsigned long intel_mask_memory(unsigned long addr
, int type
)
1229 /* Memory type is ignored */
1231 return addr
| agp_bridge
.masks
[0].mask
;
1235 /* Setup function */
1236 static gatt_mask intel_generic_masks
[] =
1241 static aper_size_info_16 intel_generic_sizes
[7] =
1244 {128, 32768, 5, 32},
1252 static int __init
intel_generic_setup (struct pci_dev
*pdev
)
1254 agp_bridge
.masks
= intel_generic_masks
;
1255 agp_bridge
.num_of_masks
= 1;
1256 agp_bridge
.aperture_sizes
= (void *) intel_generic_sizes
;
1257 agp_bridge
.size_type
= U16_APER_SIZE
;
1258 agp_bridge
.num_aperture_sizes
= 7;
1259 agp_bridge
.dev_private_data
= NULL
;
1260 agp_bridge
.needs_scratch_page
= FALSE
;
1261 agp_bridge
.configure
= intel_configure
;
1262 agp_bridge
.fetch_size
= intel_fetch_size
;
1263 agp_bridge
.cleanup
= intel_cleanup
;
1264 agp_bridge
.tlb_flush
= intel_tlbflush
;
1265 agp_bridge
.mask_memory
= intel_mask_memory
;
1266 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1267 agp_bridge
.cache_flush
= global_cache_flush
;
1268 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1269 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1270 agp_bridge
.insert_memory
= agp_generic_insert_memory
;
1271 agp_bridge
.remove_memory
= agp_generic_remove_memory
;
1272 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1273 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1277 (void) pdev
; /* unused */
1280 static int __init
intel_840_setup (struct pci_dev
*pdev
)
1282 agp_bridge
.masks
= intel_generic_masks
;
1283 agp_bridge
.num_of_masks
= 1;
1284 agp_bridge
.aperture_sizes
= (void *) intel_generic_sizes
;
1285 agp_bridge
.size_type
= U16_APER_SIZE
;
1286 agp_bridge
.num_aperture_sizes
= 7;
1287 agp_bridge
.dev_private_data
= NULL
;
1288 agp_bridge
.needs_scratch_page
= FALSE
;
1289 agp_bridge
.configure
= intel_840_configure
;
1290 agp_bridge
.fetch_size
= intel_fetch_size
;
1291 agp_bridge
.cleanup
= intel_cleanup
;
1292 agp_bridge
.tlb_flush
= intel_tlbflush
;
1293 agp_bridge
.mask_memory
= intel_mask_memory
;
1294 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1295 agp_bridge
.cache_flush
= global_cache_flush
;
1296 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1297 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1298 agp_bridge
.insert_memory
= agp_generic_insert_memory
;
1299 agp_bridge
.remove_memory
= agp_generic_remove_memory
;
1300 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1301 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1305 (void) pdev
; /* unused */
1308 static int __init
intel_850_setup (struct pci_dev
*pdev
)
1310 agp_bridge
.masks
= intel_generic_masks
;
1311 agp_bridge
.num_of_masks
= 1;
1312 agp_bridge
.aperture_sizes
= (void *) intel_generic_sizes
;
1313 agp_bridge
.size_type
= U16_APER_SIZE
;
1314 agp_bridge
.num_aperture_sizes
= 7;
1315 agp_bridge
.dev_private_data
= NULL
;
1316 agp_bridge
.needs_scratch_page
= FALSE
;
1317 agp_bridge
.configure
= intel_850_configure
;
1318 agp_bridge
.fetch_size
= intel_fetch_size
;
1319 agp_bridge
.cleanup
= intel_cleanup
;
1320 agp_bridge
.tlb_flush
= intel_tlbflush
;
1321 agp_bridge
.mask_memory
= intel_mask_memory
;
1322 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1323 agp_bridge
.cache_flush
= global_cache_flush
;
1324 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1325 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1326 agp_bridge
.insert_memory
= agp_generic_insert_memory
;
1327 agp_bridge
.remove_memory
= agp_generic_remove_memory
;
1328 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1329 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1333 (void) pdev
; /* unused */
1336 #endif /* CONFIG_AGP_INTEL */
1338 #ifdef CONFIG_AGP_VIA
1340 static int via_fetch_size(void)
1344 aper_size_info_8
*values
;
1346 values
= A_SIZE_8(agp_bridge
.aperture_sizes
);
1347 pci_read_config_byte(agp_bridge
.dev
, VIA_APSIZE
, &temp
);
1348 for (i
= 0; i
< agp_bridge
.num_aperture_sizes
; i
++) {
1349 if (temp
== values
[i
].size_value
) {
1350 agp_bridge
.previous_size
=
1351 agp_bridge
.current_size
= (void *) (values
+ i
);
1352 agp_bridge
.aperture_size_idx
= i
;
1353 return values
[i
].size
;
1360 static int via_configure(void)
1363 aper_size_info_8
*current_size
;
1365 current_size
= A_SIZE_8(agp_bridge
.current_size
);
1367 pci_write_config_byte(agp_bridge
.dev
, VIA_APSIZE
,
1368 current_size
->size_value
);
1369 /* address to map too */
1370 pci_read_config_dword(agp_bridge
.dev
, VIA_APBASE
, &temp
);
1371 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1373 /* GART control register */
1374 pci_write_config_dword(agp_bridge
.dev
, VIA_GARTCTRL
, 0x0000000f);
1376 /* attbase - aperture GATT base */
1377 pci_write_config_dword(agp_bridge
.dev
, VIA_ATTBASE
,
1378 (agp_bridge
.gatt_bus_addr
& 0xfffff000) | 3);
1382 static void via_cleanup(void)
1384 aper_size_info_8
*previous_size
;
1386 previous_size
= A_SIZE_8(agp_bridge
.previous_size
);
1387 pci_write_config_dword(agp_bridge
.dev
, VIA_ATTBASE
, 0);
1388 pci_write_config_byte(agp_bridge
.dev
, VIA_APSIZE
,
1389 previous_size
->size_value
);
1392 static void via_tlbflush(agp_memory
* mem
)
1394 pci_write_config_dword(agp_bridge
.dev
, VIA_GARTCTRL
, 0x0000008f);
1395 pci_write_config_dword(agp_bridge
.dev
, VIA_GARTCTRL
, 0x0000000f);
1398 static unsigned long via_mask_memory(unsigned long addr
, int type
)
1400 /* Memory type is ignored */
1402 return addr
| agp_bridge
.masks
[0].mask
;
1405 static aper_size_info_8 via_generic_sizes
[7] =
1408 {128, 32768, 5, 128},
1409 {64, 16384, 4, 192},
1416 static gatt_mask via_generic_masks
[] =
1421 static int __init
via_generic_setup (struct pci_dev
*pdev
)
1423 agp_bridge
.masks
= via_generic_masks
;
1424 agp_bridge
.num_of_masks
= 1;
1425 agp_bridge
.aperture_sizes
= (void *) via_generic_sizes
;
1426 agp_bridge
.size_type
= U8_APER_SIZE
;
1427 agp_bridge
.num_aperture_sizes
= 7;
1428 agp_bridge
.dev_private_data
= NULL
;
1429 agp_bridge
.needs_scratch_page
= FALSE
;
1430 agp_bridge
.configure
= via_configure
;
1431 agp_bridge
.fetch_size
= via_fetch_size
;
1432 agp_bridge
.cleanup
= via_cleanup
;
1433 agp_bridge
.tlb_flush
= via_tlbflush
;
1434 agp_bridge
.mask_memory
= via_mask_memory
;
1435 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1436 agp_bridge
.cache_flush
= global_cache_flush
;
1437 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1438 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1439 agp_bridge
.insert_memory
= agp_generic_insert_memory
;
1440 agp_bridge
.remove_memory
= agp_generic_remove_memory
;
1441 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1442 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1446 (void) pdev
; /* unused */
1449 #endif /* CONFIG_AGP_VIA */
1451 #ifdef CONFIG_AGP_SIS
1453 static int sis_fetch_size(void)
1457 aper_size_info_8
*values
;
1459 pci_read_config_byte(agp_bridge
.dev
, SIS_APSIZE
, &temp_size
);
1460 values
= A_SIZE_8(agp_bridge
.aperture_sizes
);
1461 for (i
= 0; i
< agp_bridge
.num_aperture_sizes
; i
++) {
1462 if ((temp_size
== values
[i
].size_value
) ||
1463 ((temp_size
& ~(0x03)) ==
1464 (values
[i
].size_value
& ~(0x03)))) {
1465 agp_bridge
.previous_size
=
1466 agp_bridge
.current_size
= (void *) (values
+ i
);
1468 agp_bridge
.aperture_size_idx
= i
;
1469 return values
[i
].size
;
1477 static void sis_tlbflush(agp_memory
* mem
)
1479 pci_write_config_byte(agp_bridge
.dev
, SIS_TLBFLUSH
, 0x02);
1482 static int sis_configure(void)
1485 aper_size_info_8
*current_size
;
1487 current_size
= A_SIZE_8(agp_bridge
.current_size
);
1488 pci_write_config_byte(agp_bridge
.dev
, SIS_TLBCNTRL
, 0x05);
1489 pci_read_config_dword(agp_bridge
.dev
, SIS_APBASE
, &temp
);
1490 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1491 pci_write_config_dword(agp_bridge
.dev
, SIS_ATTBASE
,
1492 agp_bridge
.gatt_bus_addr
);
1493 pci_write_config_byte(agp_bridge
.dev
, SIS_APSIZE
,
1494 current_size
->size_value
);
1498 static void sis_cleanup(void)
1500 aper_size_info_8
*previous_size
;
1502 previous_size
= A_SIZE_8(agp_bridge
.previous_size
);
1503 pci_write_config_byte(agp_bridge
.dev
, SIS_APSIZE
,
1504 (previous_size
->size_value
& ~(0x03)));
1507 static unsigned long sis_mask_memory(unsigned long addr
, int type
)
1509 /* Memory type is ignored */
1511 return addr
| agp_bridge
.masks
[0].mask
;
1514 static aper_size_info_8 sis_generic_sizes
[7] =
1516 {256, 65536, 6, 99},
1517 {128, 32768, 5, 83},
1525 static gatt_mask sis_generic_masks
[] =
1530 static int __init
sis_generic_setup (struct pci_dev
*pdev
)
1532 agp_bridge
.masks
= sis_generic_masks
;
1533 agp_bridge
.num_of_masks
= 1;
1534 agp_bridge
.aperture_sizes
= (void *) sis_generic_sizes
;
1535 agp_bridge
.size_type
= U8_APER_SIZE
;
1536 agp_bridge
.num_aperture_sizes
= 7;
1537 agp_bridge
.dev_private_data
= NULL
;
1538 agp_bridge
.needs_scratch_page
= FALSE
;
1539 agp_bridge
.configure
= sis_configure
;
1540 agp_bridge
.fetch_size
= sis_fetch_size
;
1541 agp_bridge
.cleanup
= sis_cleanup
;
1542 agp_bridge
.tlb_flush
= sis_tlbflush
;
1543 agp_bridge
.mask_memory
= sis_mask_memory
;
1544 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1545 agp_bridge
.cache_flush
= global_cache_flush
;
1546 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
1547 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
1548 agp_bridge
.insert_memory
= agp_generic_insert_memory
;
1549 agp_bridge
.remove_memory
= agp_generic_remove_memory
;
1550 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1551 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1556 #endif /* CONFIG_AGP_SIS */
1558 #ifdef CONFIG_AGP_AMD
1560 typedef struct _amd_page_map
{
1561 unsigned long *real
;
1562 unsigned long *remapped
;
1565 static struct _amd_irongate_private
{
1566 volatile u8
*registers
;
1567 amd_page_map
**gatt_pages
;
1569 } amd_irongate_private
;
1571 static int amd_create_page_map(amd_page_map
*page_map
)
1575 page_map
->real
= (unsigned long *) __get_free_page(GFP_KERNEL
);
1576 if (page_map
->real
== NULL
) {
1579 set_bit(PG_reserved
, &virt_to_page(page_map
->real
)->flags
);
1581 page_map
->remapped
= ioremap_nocache(virt_to_phys(page_map
->real
),
1583 if (page_map
->remapped
== NULL
) {
1584 clear_bit(PG_reserved
,
1585 &virt_to_page(page_map
->real
)->flags
);
1586 free_page((unsigned long) page_map
->real
);
1587 page_map
->real
= NULL
;
1592 for(i
= 0; i
< PAGE_SIZE
/ sizeof(unsigned long); i
++) {
1593 page_map
->remapped
[i
] = agp_bridge
.scratch_page
;
1599 static void amd_free_page_map(amd_page_map
*page_map
)
1601 iounmap(page_map
->remapped
);
1602 clear_bit(PG_reserved
,
1603 &virt_to_page(page_map
->real
)->flags
);
1604 free_page((unsigned long) page_map
->real
);
1607 static void amd_free_gatt_pages(void)
1610 amd_page_map
**tables
;
1611 amd_page_map
*entry
;
1613 tables
= amd_irongate_private
.gatt_pages
;
1614 for(i
= 0; i
< amd_irongate_private
.num_tables
; i
++) {
1616 if (entry
!= NULL
) {
1617 if (entry
->real
!= NULL
) {
1618 amd_free_page_map(entry
);
1626 static int amd_create_gatt_pages(int nr_tables
)
1628 amd_page_map
**tables
;
1629 amd_page_map
*entry
;
1633 tables
= kmalloc((nr_tables
+ 1) * sizeof(amd_page_map
*),
1635 if (tables
== NULL
) {
1638 memset(tables
, 0, sizeof(amd_page_map
*) * (nr_tables
+ 1));
1639 for (i
= 0; i
< nr_tables
; i
++) {
1640 entry
= kmalloc(sizeof(amd_page_map
), GFP_KERNEL
);
1641 if (entry
== NULL
) {
1645 memset(entry
, 0, sizeof(amd_page_map
));
1647 retval
= amd_create_page_map(entry
);
1648 if (retval
!= 0) break;
1650 amd_irongate_private
.num_tables
= nr_tables
;
1651 amd_irongate_private
.gatt_pages
= tables
;
1653 if (retval
!= 0) amd_free_gatt_pages();
1658 /* Since we don't need contigious memory we just try
1659 * to get the gatt table once
1662 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
1663 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
1664 GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr))
1665 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
1666 #define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
1667 GET_PAGE_DIR_IDX(addr)]->remapped)
1669 static int amd_create_gatt_table(void)
1671 aper_size_info_lvl2
*value
;
1672 amd_page_map page_dir
;
1678 value
= A_SIZE_LVL2(agp_bridge
.current_size
);
1679 retval
= amd_create_page_map(&page_dir
);
1684 retval
= amd_create_gatt_pages(value
->num_entries
/ 1024);
1686 amd_free_page_map(&page_dir
);
1690 agp_bridge
.gatt_table_real
= page_dir
.real
;
1691 agp_bridge
.gatt_table
= page_dir
.remapped
;
1692 agp_bridge
.gatt_bus_addr
= virt_to_bus(page_dir
.real
);
1694 /* Get the address for the gart region.
1695 * This is a bus address even on the alpha, b/c its
1696 * used to program the agp master not the cpu
1699 pci_read_config_dword(agp_bridge
.dev
, AMD_APBASE
, &temp
);
1700 addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1701 agp_bridge
.gart_bus_addr
= addr
;
1703 /* Calculate the agp offset */
1704 for(i
= 0; i
< value
->num_entries
/ 1024; i
++, addr
+= 0x00400000) {
1705 page_dir
.remapped
[GET_PAGE_DIR_OFF(addr
)] =
1706 virt_to_bus(amd_irongate_private
.gatt_pages
[i
]->real
);
1707 page_dir
.remapped
[GET_PAGE_DIR_OFF(addr
)] |= 0x00000001;
1713 static int amd_free_gatt_table(void)
1715 amd_page_map page_dir
;
1717 page_dir
.real
= agp_bridge
.gatt_table_real
;
1718 page_dir
.remapped
= agp_bridge
.gatt_table
;
1720 amd_free_gatt_pages();
1721 amd_free_page_map(&page_dir
);
1725 static int amd_irongate_fetch_size(void)
1729 aper_size_info_lvl2
*values
;
1731 pci_read_config_dword(agp_bridge
.dev
, AMD_APSIZE
, &temp
);
1732 temp
= (temp
& 0x0000000e);
1733 values
= A_SIZE_LVL2(agp_bridge
.aperture_sizes
);
1734 for (i
= 0; i
< agp_bridge
.num_aperture_sizes
; i
++) {
1735 if (temp
== values
[i
].size_value
) {
1736 agp_bridge
.previous_size
=
1737 agp_bridge
.current_size
= (void *) (values
+ i
);
1739 agp_bridge
.aperture_size_idx
= i
;
1740 return values
[i
].size
;
1747 static int amd_irongate_configure(void)
1749 aper_size_info_lvl2
*current_size
;
1753 current_size
= A_SIZE_LVL2(agp_bridge
.current_size
);
1755 /* Get the memory mapped registers */
1756 pci_read_config_dword(agp_bridge
.dev
, AMD_MMBASE
, &temp
);
1757 temp
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1758 amd_irongate_private
.registers
= (volatile u8
*) ioremap(temp
, 4096);
1760 /* Write out the address of the gatt table */
1761 OUTREG32(amd_irongate_private
.registers
, AMD_ATTBASE
,
1762 agp_bridge
.gatt_bus_addr
);
1764 /* Write the Sync register */
1765 pci_write_config_byte(agp_bridge
.dev
, AMD_MODECNTL
, 0x80);
1767 /* Set indexing mode */
1768 pci_write_config_byte(agp_bridge
.dev
, AMD_MODECNTL2
, 0x00);
1770 /* Write the enable register */
1771 enable_reg
= INREG16(amd_irongate_private
.registers
, AMD_GARTENABLE
);
1772 enable_reg
= (enable_reg
| 0x0004);
1773 OUTREG16(amd_irongate_private
.registers
, AMD_GARTENABLE
, enable_reg
);
1775 /* Write out the size register */
1776 pci_read_config_dword(agp_bridge
.dev
, AMD_APSIZE
, &temp
);
1777 temp
= (((temp
& ~(0x0000000e)) | current_size
->size_value
)
1779 pci_write_config_dword(agp_bridge
.dev
, AMD_APSIZE
, temp
);
1782 OUTREG32(amd_irongate_private
.registers
, AMD_TLBFLUSH
, 0x00000001);
1787 static void amd_irongate_cleanup(void)
1789 aper_size_info_lvl2
*previous_size
;
1793 previous_size
= A_SIZE_LVL2(agp_bridge
.previous_size
);
1795 enable_reg
= INREG16(amd_irongate_private
.registers
, AMD_GARTENABLE
);
1796 enable_reg
= (enable_reg
& ~(0x0004));
1797 OUTREG16(amd_irongate_private
.registers
, AMD_GARTENABLE
, enable_reg
);
1799 /* Write back the previous size and disable gart translation */
1800 pci_read_config_dword(agp_bridge
.dev
, AMD_APSIZE
, &temp
);
1801 temp
= ((temp
& ~(0x0000000f)) | previous_size
->size_value
);
1802 pci_write_config_dword(agp_bridge
.dev
, AMD_APSIZE
, temp
);
1803 iounmap((void *) amd_irongate_private
.registers
);
1807 * This routine could be implemented by taking the addresses
1808 * written to the GATT, and flushing them individually. However
1809 * currently it just flushes the whole table. Which is probably
1810 * more efficent, since agp_memory blocks can be a large number of
1814 static void amd_irongate_tlbflush(agp_memory
* temp
)
1816 OUTREG32(amd_irongate_private
.registers
, AMD_TLBFLUSH
, 0x00000001);
1819 static unsigned long amd_irongate_mask_memory(unsigned long addr
, int type
)
1821 /* Only type 0 is supported by the irongate */
1823 return addr
| agp_bridge
.masks
[0].mask
;
1826 static int amd_insert_memory(agp_memory
* mem
,
1827 off_t pg_start
, int type
)
1829 int i
, j
, num_entries
;
1830 unsigned long *cur_gatt
;
1833 num_entries
= A_SIZE_LVL2(agp_bridge
.current_size
)->num_entries
;
1835 if (type
!= 0 || mem
->type
!= 0) {
1838 if ((pg_start
+ mem
->page_count
) > num_entries
) {
1843 while (j
< (pg_start
+ mem
->page_count
)) {
1844 addr
= (j
* PAGE_SIZE
) + agp_bridge
.gart_bus_addr
;
1845 cur_gatt
= GET_GATT(addr
);
1846 if (!PGE_EMPTY(cur_gatt
[GET_GATT_OFF(addr
)])) {
1852 if (mem
->is_flushed
== FALSE
) {
1854 mem
->is_flushed
= TRUE
;
1857 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
1858 addr
= (j
* PAGE_SIZE
) + agp_bridge
.gart_bus_addr
;
1859 cur_gatt
= GET_GATT(addr
);
1860 cur_gatt
[GET_GATT_OFF(addr
)] = mem
->memory
[i
];
1862 agp_bridge
.tlb_flush(mem
);
1866 static int amd_remove_memory(agp_memory
* mem
, off_t pg_start
,
1870 unsigned long *cur_gatt
;
1873 if (type
!= 0 || mem
->type
!= 0) {
1876 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
1877 addr
= (i
* PAGE_SIZE
) + agp_bridge
.gart_bus_addr
;
1878 cur_gatt
= GET_GATT(addr
);
1879 cur_gatt
[GET_GATT_OFF(addr
)] =
1880 (unsigned long) agp_bridge
.scratch_page
;
1883 agp_bridge
.tlb_flush(mem
);
1887 static aper_size_info_lvl2 amd_irongate_sizes
[7] =
1889 {2048, 524288, 0x0000000c},
1890 {1024, 262144, 0x0000000a},
1891 {512, 131072, 0x00000008},
1892 {256, 65536, 0x00000006},
1893 {128, 32768, 0x00000004},
1894 {64, 16384, 0x00000002},
1895 {32, 8192, 0x00000000}
1898 static gatt_mask amd_irongate_masks
[] =
1903 static int __init
amd_irongate_setup (struct pci_dev
*pdev
)
1905 agp_bridge
.masks
= amd_irongate_masks
;
1906 agp_bridge
.num_of_masks
= 1;
1907 agp_bridge
.aperture_sizes
= (void *) amd_irongate_sizes
;
1908 agp_bridge
.size_type
= LVL2_APER_SIZE
;
1909 agp_bridge
.num_aperture_sizes
= 7;
1910 agp_bridge
.dev_private_data
= (void *) &amd_irongate_private
;
1911 agp_bridge
.needs_scratch_page
= FALSE
;
1912 agp_bridge
.configure
= amd_irongate_configure
;
1913 agp_bridge
.fetch_size
= amd_irongate_fetch_size
;
1914 agp_bridge
.cleanup
= amd_irongate_cleanup
;
1915 agp_bridge
.tlb_flush
= amd_irongate_tlbflush
;
1916 agp_bridge
.mask_memory
= amd_irongate_mask_memory
;
1917 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
1918 agp_bridge
.cache_flush
= global_cache_flush
;
1919 agp_bridge
.create_gatt_table
= amd_create_gatt_table
;
1920 agp_bridge
.free_gatt_table
= amd_free_gatt_table
;
1921 agp_bridge
.insert_memory
= amd_insert_memory
;
1922 agp_bridge
.remove_memory
= amd_remove_memory
;
1923 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
1924 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
1928 (void) pdev
; /* unused */
1931 #endif /* CONFIG_AGP_AMD */
1933 #ifdef CONFIG_AGP_ALI
1935 static int ali_fetch_size(void)
1939 aper_size_info_32
*values
;
1941 pci_read_config_dword(agp_bridge
.dev
, ALI_ATTBASE
, &temp
);
1942 temp
&= ~(0xfffffff0);
1943 values
= A_SIZE_32(agp_bridge
.aperture_sizes
);
1945 for (i
= 0; i
< agp_bridge
.num_aperture_sizes
; i
++) {
1946 if (temp
== values
[i
].size_value
) {
1947 agp_bridge
.previous_size
=
1948 agp_bridge
.current_size
= (void *) (values
+ i
);
1949 agp_bridge
.aperture_size_idx
= i
;
1950 return values
[i
].size
;
1957 static void ali_tlbflush(agp_memory
* mem
)
1961 pci_read_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
, &temp
);
1962 pci_write_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
,
1963 ((temp
& 0xffffff00) | 0x00000090));
1964 pci_write_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
,
1965 ((temp
& 0xffffff00) | 0x00000010));
1968 static void ali_cleanup(void)
1970 aper_size_info_32
*previous_size
;
1973 previous_size
= A_SIZE_32(agp_bridge
.previous_size
);
1975 pci_read_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
, &temp
);
1976 pci_write_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
,
1977 ((temp
& 0xffffff00) | 0x00000090));
1978 pci_write_config_dword(agp_bridge
.dev
, ALI_ATTBASE
,
1979 previous_size
->size_value
);
1982 static int ali_configure(void)
1985 aper_size_info_32
*current_size
;
1987 current_size
= A_SIZE_32(agp_bridge
.current_size
);
1989 /* aperture size and gatt addr */
1990 pci_write_config_dword(agp_bridge
.dev
, ALI_ATTBASE
,
1991 agp_bridge
.gatt_bus_addr
| current_size
->size_value
);
1994 pci_read_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
, &temp
);
1995 pci_write_config_dword(agp_bridge
.dev
, ALI_TLBCTRL
,
1996 ((temp
& 0xffffff00) | 0x00000010));
1998 /* address to map to */
1999 pci_read_config_dword(agp_bridge
.dev
, ALI_APBASE
, &temp
);
2000 agp_bridge
.gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
2004 static unsigned long ali_mask_memory(unsigned long addr
, int type
)
2006 /* Memory type is ignored */
2008 return addr
| agp_bridge
.masks
[0].mask
;
2012 /* Setup function */
2013 static gatt_mask ali_generic_masks
[] =
2018 static aper_size_info_32 ali_generic_sizes
[7] =
2020 {256, 65536, 6, 10},
2029 static int __init
ali_generic_setup (struct pci_dev
*pdev
)
2031 agp_bridge
.masks
= ali_generic_masks
;
2032 agp_bridge
.num_of_masks
= 1;
2033 agp_bridge
.aperture_sizes
= (void *) ali_generic_sizes
;
2034 agp_bridge
.size_type
= U32_APER_SIZE
;
2035 agp_bridge
.num_aperture_sizes
= 7;
2036 agp_bridge
.dev_private_data
= NULL
;
2037 agp_bridge
.needs_scratch_page
= FALSE
;
2038 agp_bridge
.configure
= ali_configure
;
2039 agp_bridge
.fetch_size
= ali_fetch_size
;
2040 agp_bridge
.cleanup
= ali_cleanup
;
2041 agp_bridge
.tlb_flush
= ali_tlbflush
;
2042 agp_bridge
.mask_memory
= ali_mask_memory
;
2043 agp_bridge
.agp_enable
= agp_generic_agp_enable
;
2044 agp_bridge
.cache_flush
= global_cache_flush
;
2045 agp_bridge
.create_gatt_table
= agp_generic_create_gatt_table
;
2046 agp_bridge
.free_gatt_table
= agp_generic_free_gatt_table
;
2047 agp_bridge
.insert_memory
= agp_generic_insert_memory
;
2048 agp_bridge
.remove_memory
= agp_generic_remove_memory
;
2049 agp_bridge
.alloc_by_type
= agp_generic_alloc_by_type
;
2050 agp_bridge
.free_by_type
= agp_generic_free_by_type
;
2054 (void) pdev
; /* unused */
2057 #endif /* CONFIG_AGP_ALI */
2060 /* per-chipset initialization data.
2061 * note -- all chipsets for a single vendor MUST be grouped together
2064 unsigned short device_id
; /* first, to make table easier to read */
2065 unsigned short vendor_id
;
2066 enum chipset_type chipset
;
2067 const char *vendor_name
;
2068 const char *chipset_name
;
2069 int (*chipset_setup
) (struct pci_dev
*pdev
);
2070 } agp_bridge_info
[] __initdata
= {
2072 #ifdef CONFIG_AGP_ALI
2073 { PCI_DEVICE_ID_AL_M1541_0
,
2078 ali_generic_setup
},
2084 ali_generic_setup
},
2085 #endif /* CONFIG_AGP_ALI */
2087 #ifdef CONFIG_AGP_AMD
2088 { PCI_DEVICE_ID_AMD_IRONGATE_0
,
2093 amd_irongate_setup
},
2099 amd_irongate_setup
},
2100 #endif /* CONFIG_AGP_AMD */
2102 #ifdef CONFIG_AGP_INTEL
2103 { PCI_DEVICE_ID_INTEL_82443LX_0
,
2104 PCI_VENDOR_ID_INTEL
,
2108 intel_generic_setup
},
2109 { PCI_DEVICE_ID_INTEL_82443BX_0
,
2110 PCI_VENDOR_ID_INTEL
,
2114 intel_generic_setup
},
2115 { PCI_DEVICE_ID_INTEL_82443GX_0
,
2116 PCI_VENDOR_ID_INTEL
,
2120 intel_generic_setup
},
2121 /* could we add support for PCI_DEVICE_ID_INTEL_815_1 too ? */
2122 { PCI_DEVICE_ID_INTEL_815_0
,
2123 PCI_VENDOR_ID_INTEL
,
2127 intel_generic_setup
},
2128 { PCI_DEVICE_ID_INTEL_840_0
,
2129 PCI_VENDOR_ID_INTEL
,
2134 { PCI_DEVICE_ID_INTEL_850_0
,
2135 PCI_VENDOR_ID_INTEL
,
2141 PCI_VENDOR_ID_INTEL
,
2145 intel_generic_setup
},
2146 #endif /* CONFIG_AGP_INTEL */
2148 #ifdef CONFIG_AGP_SIS
2149 { PCI_DEVICE_ID_SI_630
,
2154 sis_generic_setup
},
2155 { PCI_DEVICE_ID_SI_540
,
2160 sis_generic_setup
},
2161 { PCI_DEVICE_ID_SI_620
,
2166 sis_generic_setup
},
2167 { PCI_DEVICE_ID_SI_530
,
2172 sis_generic_setup
},
2173 { PCI_DEVICE_ID_SI_630
,
2178 sis_generic_setup
},
2179 { PCI_DEVICE_ID_SI_540
,
2184 sis_generic_setup
},
2185 { PCI_DEVICE_ID_SI_620
,
2190 sis_generic_setup
},
2191 { PCI_DEVICE_ID_SI_530
,
2196 sis_generic_setup
},
2202 sis_generic_setup
},
2203 #endif /* CONFIG_AGP_SIS */
2205 #ifdef CONFIG_AGP_VIA
2206 { PCI_DEVICE_ID_VIA_8501_0
,
2211 via_generic_setup
},
2212 { PCI_DEVICE_ID_VIA_82C597_0
,
2217 via_generic_setup
},
2218 { PCI_DEVICE_ID_VIA_82C598_0
,
2223 via_generic_setup
},
2224 { PCI_DEVICE_ID_VIA_82C691_0
,
2229 via_generic_setup
},
2230 { PCI_DEVICE_ID_VIA_8371_0
,
2235 via_generic_setup
},
2236 { PCI_DEVICE_ID_VIA_8363_0
,
2241 via_generic_setup
},
2247 via_generic_setup
},
2248 #endif /* CONFIG_AGP_VIA */
2250 { 0, }, /* dummy final entry, always present */
2254 /* scan table above for supported devices */
2255 static int __init
agp_lookup_host_bridge (struct pci_dev
*pdev
)
2259 for (i
= 0; i
< ARRAY_SIZE (agp_bridge_info
); i
++)
2260 if (pdev
->vendor
== agp_bridge_info
[i
].vendor_id
)
2263 if (i
>= ARRAY_SIZE (agp_bridge_info
)) {
2264 printk (KERN_DEBUG PFX
"unsupported bridge\n");
2268 while ((i
< ARRAY_SIZE (agp_bridge_info
)) &&
2269 (agp_bridge_info
[i
].vendor_id
== pdev
->vendor
)) {
2270 if (pdev
->device
== agp_bridge_info
[i
].device_id
) {
2271 printk (KERN_INFO PFX
"Detected %s %s chipset\n",
2272 agp_bridge_info
[i
].vendor_name
,
2273 agp_bridge_info
[i
].chipset_name
);
2274 agp_bridge
.type
= agp_bridge_info
[i
].chipset
;
2275 return agp_bridge_info
[i
].chipset_setup (pdev
);
2281 i
--; /* point to vendor generic entry (device_id == 0) */
2283 /* try init anyway, if user requests it AND
2284 * there is a 'generic' bridge entry for this vendor */
2285 if (agp_try_unsupported
&& agp_bridge_info
[i
].device_id
== 0) {
2286 printk(KERN_WARNING PFX
"Trying generic %s routines"
2287 " for device id: %04x\n",
2288 agp_bridge_info
[i
].vendor_name
, pdev
->device
);
2289 agp_bridge
.type
= agp_bridge_info
[i
].chipset
;
2290 return agp_bridge_info
[i
].chipset_setup (pdev
);
2293 printk(KERN_ERR PFX
"Unsupported %s chipset (device id: %04x),"
2294 " you might want to try agp_try_unsupported=1.\n",
2295 agp_bridge_info
[i
].vendor_name
, pdev
->device
);
2300 /* Supported Device Scanning routine */
2302 static int __init
agp_find_supported_device(void)
2304 struct pci_dev
*dev
= NULL
;
2306 u32 cap_id
, scratch
;
2308 if ((dev
= pci_find_class(PCI_CLASS_BRIDGE_HOST
<< 8, NULL
)) == NULL
)
2311 agp_bridge
.dev
= dev
;
2313 /* Need to test for I810 here */
2314 #ifdef CONFIG_AGP_I810
2315 if (dev
->vendor
== PCI_VENDOR_ID_INTEL
) {
2316 struct pci_dev
*i810_dev
;
2318 switch (dev
->device
) {
2319 case PCI_DEVICE_ID_INTEL_810_0
:
2320 i810_dev
= pci_find_device(PCI_VENDOR_ID_INTEL
,
2321 PCI_DEVICE_ID_INTEL_810_1
,
2323 if (i810_dev
== NULL
) {
2324 printk(KERN_ERR PFX
"Detected an Intel i810,"
2325 " but could not find the secondary"
2329 printk(KERN_INFO PFX
"Detected an Intel "
2331 agp_bridge
.type
= INTEL_I810
;
2332 return intel_i810_setup (i810_dev
);
2334 case PCI_DEVICE_ID_INTEL_810_DC100_0
:
2335 i810_dev
= pci_find_device(PCI_VENDOR_ID_INTEL
,
2336 PCI_DEVICE_ID_INTEL_810_DC100_1
,
2338 if (i810_dev
== NULL
) {
2339 printk(KERN_ERR PFX
"Detected an Intel i810 "
2340 "DC100, but could not find the "
2341 "secondary device.\n");
2344 printk(KERN_INFO PFX
"Detected an Intel i810 "
2345 "DC100 Chipset.\n");
2346 agp_bridge
.type
= INTEL_I810
;
2347 return intel_i810_setup(i810_dev
);
2349 case PCI_DEVICE_ID_INTEL_810_E_0
:
2350 i810_dev
= pci_find_device(PCI_VENDOR_ID_INTEL
,
2351 PCI_DEVICE_ID_INTEL_810_E_1
,
2353 if (i810_dev
== NULL
) {
2354 printk(KERN_ERR PFX
"Detected an Intel i810 E"
2355 ", but could not find the secondary "
2359 printk(KERN_INFO PFX
"Detected an Intel i810 E "
2361 agp_bridge
.type
= INTEL_I810
;
2362 return intel_i810_setup(i810_dev
);
2364 case PCI_DEVICE_ID_INTEL_815_0
:
2365 /* The i815 can operate either as an i810 style
2366 * integrated device, or as an AGP4X motherboard.
2368 * This only addresses the first mode:
2370 i810_dev
= pci_find_device(PCI_VENDOR_ID_INTEL
,
2371 PCI_DEVICE_ID_INTEL_815_1
,
2373 if (i810_dev
== NULL
) {
2374 printk(KERN_ERR PFX
"agpgart: Detected an "
2375 "Intel i815, but could not find the"
2376 " secondary device.\n");
2377 agp_bridge
.type
= NOT_SUPPORTED
;
2380 printk(KERN_INFO PFX
"agpgart: Detected an Intel i815 "
2382 agp_bridge
.type
= INTEL_I810
;
2383 return intel_i810_setup(i810_dev
);
2389 #endif /* CONFIG_AGP_I810 */
2392 pci_read_config_dword(dev
, 0x04, &scratch
);
2393 if (!(scratch
& 0x00100000))
2396 pci_read_config_byte(dev
, 0x34, &cap_ptr
);
2397 if (cap_ptr
!= 0x00) {
2399 pci_read_config_dword(dev
, cap_ptr
, &cap_id
);
2401 if ((cap_id
& 0xff) != 0x02)
2402 cap_ptr
= (cap_id
>> 8) & 0xff;
2404 while (((cap_id
& 0xff) != 0x02) && (cap_ptr
!= 0x00));
2406 if (cap_ptr
== 0x00)
2408 agp_bridge
.capndx
= cap_ptr
;
2410 /* Fill in the mode register */
2411 pci_read_config_dword(agp_bridge
.dev
,
2412 agp_bridge
.capndx
+ 4,
2415 /* probe for known chipsets */
2416 return agp_lookup_host_bridge (dev
);
2419 struct agp_max_table
{
2424 static struct agp_max_table maxes_table
[9] __initdata
=
2437 static int __init
agp_find_max (void)
2439 long memory
, index
, result
;
2441 memory
= virt_to_phys(high_memory
) >> 20;
2444 while ((memory
> maxes_table
[index
].mem
) &&
2449 result
= maxes_table
[index
- 1].agp
+
2450 ( (memory
- maxes_table
[index
- 1].mem
) *
2451 (maxes_table
[index
].agp
- maxes_table
[index
- 1].agp
)) /
2452 (maxes_table
[index
].mem
- maxes_table
[index
- 1].mem
);
2454 printk(KERN_INFO PFX
"Maximum main memory to use "
2455 "for agp memory: %ldM\n", result
);
2456 result
= result
<< (20 - PAGE_SHIFT
);
2460 #define AGPGART_VERSION_MAJOR 0
2461 #define AGPGART_VERSION_MINOR 99
2463 static agp_version agp_current_version
=
2465 AGPGART_VERSION_MAJOR
,
2466 AGPGART_VERSION_MINOR
2469 static int __init
agp_backend_initialize(void)
2471 int size_value
, rc
, got_gatt
=0, got_keylist
=0;
2473 memset(&agp_bridge
, 0, sizeof(struct agp_bridge_data
));
2474 agp_bridge
.type
= NOT_SUPPORTED
;
2475 agp_bridge
.max_memory_agp
= agp_find_max();
2476 agp_bridge
.version
= &agp_current_version
;
2478 rc
= agp_find_supported_device();
2480 /* not KERN_ERR because error msg should have already printed */
2481 printk(KERN_DEBUG PFX
"no supported devices found.\n");
2485 if (agp_bridge
.needs_scratch_page
== TRUE
) {
2486 agp_bridge
.scratch_page
= agp_alloc_page();
2488 if (agp_bridge
.scratch_page
== 0) {
2489 printk(KERN_ERR PFX
"unable to get memory for "
2493 agp_bridge
.scratch_page
=
2494 virt_to_phys((void *) agp_bridge
.scratch_page
);
2495 agp_bridge
.scratch_page
=
2496 agp_bridge
.mask_memory(agp_bridge
.scratch_page
, 0);
2499 size_value
= agp_bridge
.fetch_size();
2501 if (size_value
== 0) {
2502 printk(KERN_ERR PFX
"unable to detrimine aperture size.\n");
2506 if (agp_bridge
.create_gatt_table()) {
2507 printk(KERN_ERR PFX
"unable to get memory for graphics "
2508 "translation table.\n");
2514 agp_bridge
.key_list
= vmalloc(PAGE_SIZE
* 4);
2515 if (agp_bridge
.key_list
== NULL
) {
2516 printk(KERN_ERR PFX
"error allocating memory for key lists.\n");
2522 /* FIXME vmalloc'd memory not guaranteed contiguous */
2523 memset(agp_bridge
.key_list
, 0, PAGE_SIZE
* 4);
2525 if (agp_bridge
.configure()) {
2526 printk(KERN_ERR PFX
"error configuring host chipset.\n");
2531 printk(KERN_INFO PFX
"AGP aperture is %dM @ 0x%lx\n",
2532 size_value
, agp_bridge
.gart_bus_addr
);
2537 if (agp_bridge
.needs_scratch_page
== TRUE
) {
2538 agp_bridge
.scratch_page
&= ~(0x00000fff);
2539 agp_destroy_page((unsigned long)
2540 phys_to_virt(agp_bridge
.scratch_page
));
2543 agp_bridge
.free_gatt_table();
2545 vfree(agp_bridge
.key_list
);
2550 /* cannot be __exit b/c as it could be called from __init code */
2551 static void agp_backend_cleanup(void)
2553 agp_bridge
.cleanup();
2554 agp_bridge
.free_gatt_table();
2555 vfree(agp_bridge
.key_list
);
2557 if (agp_bridge
.needs_scratch_page
== TRUE
) {
2558 agp_bridge
.scratch_page
&= ~(0x00000fff);
2559 agp_destroy_page((unsigned long)
2560 phys_to_virt(agp_bridge
.scratch_page
));
2564 extern int agp_frontend_initialize(void);
2565 extern void agp_frontend_cleanup(void);
2567 static const drm_agp_t drm_agp
= {
2569 &agp_allocate_memory
,
2573 &agp_backend_acquire
,
2574 &agp_backend_release
,
2578 static int __init
agp_init(void)
2582 printk(KERN_INFO
"Linux agpgart interface v%d.%d (c) Jeff Hartmann\n",
2583 AGPGART_VERSION_MAJOR
, AGPGART_VERSION_MINOR
);
2585 ret_val
= agp_backend_initialize();
2587 agp_bridge
.type
= NOT_SUPPORTED
;
2590 ret_val
= agp_frontend_initialize();
2592 agp_bridge
.type
= NOT_SUPPORTED
;
2593 agp_backend_cleanup();
2597 inter_module_register("drm_agp", THIS_MODULE
, &drm_agp
);
2601 static void __exit
agp_cleanup(void)
2603 agp_frontend_cleanup();
2604 agp_backend_cleanup();
2605 inter_module_unregister("drm_agp");
2608 module_init(agp_init
);
2609 module_exit(agp_cleanup
);