2 * AMD K7 AGPGART routines.
5 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/agp_backend.h>
10 #include <linux/page-flags.h>
14 #define AMD_MMBASE 0x14
15 #define AMD_APSIZE 0xac
16 #define AMD_MODECNTL 0xb0
17 #define AMD_MODECNTL2 0xb2
18 #define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */
19 #define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */
20 #define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */
21 #define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */
23 static struct pci_device_id agp_amdk7_pci_table
[];
27 unsigned long __iomem
*remapped
;
30 static struct _amd_irongate_private
{
31 volatile u8 __iomem
*registers
;
32 struct amd_page_map
**gatt_pages
;
34 } amd_irongate_private
;
36 static int amd_create_page_map(struct amd_page_map
*page_map
)
40 page_map
->real
= (unsigned long *) __get_free_page(GFP_KERNEL
);
41 if (page_map
->real
== NULL
)
44 SetPageReserved(virt_to_page(page_map
->real
));
46 page_map
->remapped
= ioremap_nocache(virt_to_phys(page_map
->real
),
48 if (page_map
->remapped
== NULL
) {
49 ClearPageReserved(virt_to_page(page_map
->real
));
50 free_page((unsigned long) page_map
->real
);
51 page_map
->real
= NULL
;
56 for (i
= 0; i
< PAGE_SIZE
/ sizeof(unsigned long); i
++)
57 writel(agp_bridge
->scratch_page
, page_map
->remapped
+i
);
62 static void amd_free_page_map(struct amd_page_map
*page_map
)
64 iounmap(page_map
->remapped
);
65 ClearPageReserved(virt_to_page(page_map
->real
));
66 free_page((unsigned long) page_map
->real
);
69 static void amd_free_gatt_pages(void)
72 struct amd_page_map
**tables
;
73 struct amd_page_map
*entry
;
75 tables
= amd_irongate_private
.gatt_pages
;
76 for (i
= 0; i
< amd_irongate_private
.num_tables
; i
++) {
79 if (entry
->real
!= NULL
)
80 amd_free_page_map(entry
);
85 amd_irongate_private
.gatt_pages
= NULL
;
88 static int amd_create_gatt_pages(int nr_tables
)
90 struct amd_page_map
**tables
;
91 struct amd_page_map
*entry
;
95 tables
= kmalloc((nr_tables
+ 1) * sizeof(struct amd_page_map
*),
100 memset (tables
, 0, sizeof(struct amd_page_map
*) * (nr_tables
+ 1));
101 for (i
= 0; i
< nr_tables
; i
++) {
102 entry
= kmalloc(sizeof(struct amd_page_map
), GFP_KERNEL
);
107 memset (entry
, 0, sizeof(struct amd_page_map
));
109 retval
= amd_create_page_map(entry
);
113 amd_irongate_private
.num_tables
= nr_tables
;
114 amd_irongate_private
.gatt_pages
= tables
;
117 amd_free_gatt_pages();
122 /* Since we don't need contigious memory we just try
123 * to get the gatt table once
126 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
127 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
128 GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
129 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
130 #define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
131 GET_PAGE_DIR_IDX(addr)]->remapped)
133 static int amd_create_gatt_table(void)
135 struct aper_size_info_lvl2
*value
;
136 struct amd_page_map page_dir
;
142 value
= A_SIZE_LVL2(agp_bridge
->current_size
);
143 retval
= amd_create_page_map(&page_dir
);
147 retval
= amd_create_gatt_pages(value
->num_entries
/ 1024);
149 amd_free_page_map(&page_dir
);
153 agp_bridge
->gatt_table_real
= (u32
*)page_dir
.real
;
154 agp_bridge
->gatt_table
= (u32 __iomem
*)page_dir
.remapped
;
155 agp_bridge
->gatt_bus_addr
= virt_to_phys(page_dir
.real
);
157 /* Get the address for the gart region.
158 * This is a bus address even on the alpha, b/c its
159 * used to program the agp master not the cpu
162 pci_read_config_dword(agp_bridge
->dev
, AGP_APBASE
, &temp
);
163 addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
164 agp_bridge
->gart_bus_addr
= addr
;
166 /* Calculate the agp offset */
167 for (i
= 0; i
< value
->num_entries
/ 1024; i
++, addr
+= 0x00400000) {
168 writel(virt_to_phys(amd_irongate_private
.gatt_pages
[i
]->real
) | 1,
169 page_dir
.remapped
+GET_PAGE_DIR_OFF(addr
));
175 static int amd_free_gatt_table(void)
177 struct amd_page_map page_dir
;
179 page_dir
.real
= (unsigned long *)agp_bridge
->gatt_table_real
;
180 page_dir
.remapped
= (unsigned long __iomem
*)agp_bridge
->gatt_table
;
182 amd_free_gatt_pages();
183 amd_free_page_map(&page_dir
);
187 static int amd_irongate_fetch_size(void)
191 struct aper_size_info_lvl2
*values
;
193 pci_read_config_dword(agp_bridge
->dev
, AMD_APSIZE
, &temp
);
194 temp
= (temp
& 0x0000000e);
195 values
= A_SIZE_LVL2(agp_bridge
->driver
->aperture_sizes
);
196 for (i
= 0; i
< agp_bridge
->driver
->num_aperture_sizes
; i
++) {
197 if (temp
== values
[i
].size_value
) {
198 agp_bridge
->previous_size
=
199 agp_bridge
->current_size
= (void *) (values
+ i
);
201 agp_bridge
->aperture_size_idx
= i
;
202 return values
[i
].size
;
209 static int amd_irongate_configure(void)
211 struct aper_size_info_lvl2
*current_size
;
215 current_size
= A_SIZE_LVL2(agp_bridge
->current_size
);
217 /* Get the memory mapped registers */
218 pci_read_config_dword(agp_bridge
->dev
, AMD_MMBASE
, &temp
);
219 temp
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
220 amd_irongate_private
.registers
= (volatile u8 __iomem
*) ioremap(temp
, 4096);
222 /* Write out the address of the gatt table */
223 OUTREG32(amd_irongate_private
.registers
, AMD_ATTBASE
,
224 agp_bridge
->gatt_bus_addr
);
226 /* Write the Sync register */
227 pci_write_config_byte(agp_bridge
->dev
, AMD_MODECNTL
, 0x80);
229 /* Set indexing mode */
230 pci_write_config_byte(agp_bridge
->dev
, AMD_MODECNTL2
, 0x00);
232 /* Write the enable register */
233 enable_reg
= INREG16(amd_irongate_private
.registers
, AMD_GARTENABLE
);
234 enable_reg
= (enable_reg
| 0x0004);
235 OUTREG16(amd_irongate_private
.registers
, AMD_GARTENABLE
, enable_reg
);
237 /* Write out the size register */
238 pci_read_config_dword(agp_bridge
->dev
, AMD_APSIZE
, &temp
);
239 temp
= (((temp
& ~(0x0000000e)) | current_size
->size_value
)
241 pci_write_config_dword(agp_bridge
->dev
, AMD_APSIZE
, temp
);
244 OUTREG32(amd_irongate_private
.registers
, AMD_TLBFLUSH
, 0x00000001);
249 static void amd_irongate_cleanup(void)
251 struct aper_size_info_lvl2
*previous_size
;
255 previous_size
= A_SIZE_LVL2(agp_bridge
->previous_size
);
257 enable_reg
= INREG16(amd_irongate_private
.registers
, AMD_GARTENABLE
);
258 enable_reg
= (enable_reg
& ~(0x0004));
259 OUTREG16(amd_irongate_private
.registers
, AMD_GARTENABLE
, enable_reg
);
261 /* Write back the previous size and disable gart translation */
262 pci_read_config_dword(agp_bridge
->dev
, AMD_APSIZE
, &temp
);
263 temp
= ((temp
& ~(0x0000000f)) | previous_size
->size_value
);
264 pci_write_config_dword(agp_bridge
->dev
, AMD_APSIZE
, temp
);
265 iounmap((void __iomem
*) amd_irongate_private
.registers
);
269 * This routine could be implemented by taking the addresses
270 * written to the GATT, and flushing them individually. However
271 * currently it just flushes the whole table. Which is probably
272 * more efficent, since agp_memory blocks can be a large number of
276 static void amd_irongate_tlbflush(struct agp_memory
*temp
)
278 OUTREG32(amd_irongate_private
.registers
, AMD_TLBFLUSH
, 0x00000001);
281 static int amd_insert_memory(struct agp_memory
*mem
, off_t pg_start
, int type
)
283 int i
, j
, num_entries
;
284 unsigned long __iomem
*cur_gatt
;
287 num_entries
= A_SIZE_LVL2(agp_bridge
->current_size
)->num_entries
;
289 if (type
!= 0 || mem
->type
!= 0)
292 if ((pg_start
+ mem
->page_count
) > num_entries
)
296 while (j
< (pg_start
+ mem
->page_count
)) {
297 addr
= (j
* PAGE_SIZE
) + agp_bridge
->gart_bus_addr
;
298 cur_gatt
= GET_GATT(addr
);
299 if (!PGE_EMPTY(agp_bridge
, readl(cur_gatt
+GET_GATT_OFF(addr
))))
304 if (mem
->is_flushed
== FALSE
) {
305 global_cache_flush();
306 mem
->is_flushed
= TRUE
;
309 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
310 addr
= (j
* PAGE_SIZE
) + agp_bridge
->gart_bus_addr
;
311 cur_gatt
= GET_GATT(addr
);
312 writel(agp_generic_mask_memory(mem
->memory
[i
], mem
->type
), cur_gatt
+GET_GATT_OFF(addr
));
314 amd_irongate_tlbflush(mem
);
318 static int amd_remove_memory(struct agp_memory
*mem
, off_t pg_start
, int type
)
321 unsigned long __iomem
*cur_gatt
;
324 if (type
!= 0 || mem
->type
!= 0)
327 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
328 addr
= (i
* PAGE_SIZE
) + agp_bridge
->gart_bus_addr
;
329 cur_gatt
= GET_GATT(addr
);
330 writel(agp_bridge
->scratch_page
, cur_gatt
+GET_GATT_OFF(addr
));
333 amd_irongate_tlbflush(mem
);
337 static struct aper_size_info_lvl2 amd_irongate_sizes
[7] =
339 {2048, 524288, 0x0000000c},
340 {1024, 262144, 0x0000000a},
341 {512, 131072, 0x00000008},
342 {256, 65536, 0x00000006},
343 {128, 32768, 0x00000004},
344 {64, 16384, 0x00000002},
345 {32, 8192, 0x00000000}
348 static struct gatt_mask amd_irongate_masks
[] =
350 {.mask
= 1, .type
= 0}
353 struct agp_bridge_driver amd_irongate_driver
= {
354 .owner
= THIS_MODULE
,
355 .aperture_sizes
= amd_irongate_sizes
,
356 .size_type
= LVL2_APER_SIZE
,
357 .num_aperture_sizes
= 7,
358 .configure
= amd_irongate_configure
,
359 .fetch_size
= amd_irongate_fetch_size
,
360 .cleanup
= amd_irongate_cleanup
,
361 .tlb_flush
= amd_irongate_tlbflush
,
362 .mask_memory
= agp_generic_mask_memory
,
363 .masks
= amd_irongate_masks
,
364 .agp_enable
= agp_generic_enable
,
365 .cache_flush
= global_cache_flush
,
366 .create_gatt_table
= amd_create_gatt_table
,
367 .free_gatt_table
= amd_free_gatt_table
,
368 .insert_memory
= amd_insert_memory
,
369 .remove_memory
= amd_remove_memory
,
370 .alloc_by_type
= agp_generic_alloc_by_type
,
371 .free_by_type
= agp_generic_free_by_type
,
372 .agp_alloc_page
= agp_generic_alloc_page
,
373 .agp_destroy_page
= agp_generic_destroy_page
,
376 static struct agp_device_ids amd_agp_device_ids
[] __devinitdata
=
379 .device_id
= PCI_DEVICE_ID_AMD_FE_GATE_7006
,
380 .chipset_name
= "Irongate",
383 .device_id
= PCI_DEVICE_ID_AMD_FE_GATE_700E
,
384 .chipset_name
= "761",
387 .device_id
= PCI_DEVICE_ID_AMD_FE_GATE_700C
,
388 .chipset_name
= "760MP",
390 { }, /* dummy final entry, always present */
393 static int __devinit
agp_amdk7_probe(struct pci_dev
*pdev
,
394 const struct pci_device_id
*ent
)
396 struct agp_bridge_data
*bridge
;
400 cap_ptr
= pci_find_capability(pdev
, PCI_CAP_ID_AGP
);
404 j
= ent
- agp_amdk7_pci_table
;
405 printk(KERN_INFO PFX
"Detected AMD %s chipset\n",
406 amd_agp_device_ids
[j
].chipset_name
);
408 bridge
= agp_alloc_bridge();
412 bridge
->driver
= &amd_irongate_driver
;
413 bridge
->dev_private_data
= &amd_irongate_private
,
415 bridge
->capndx
= cap_ptr
;
417 /* Fill in the mode register */
418 pci_read_config_dword(pdev
,
419 bridge
->capndx
+PCI_AGP_STATUS
,
422 pci_set_drvdata(pdev
, bridge
);
423 return agp_add_bridge(bridge
);
426 static void __devexit
agp_amdk7_remove(struct pci_dev
*pdev
)
428 struct agp_bridge_data
*bridge
= pci_get_drvdata(pdev
);
430 agp_remove_bridge(bridge
);
431 agp_put_bridge(bridge
);
434 /* must be the same order as name table above */
435 static struct pci_device_id agp_amdk7_pci_table
[] = {
437 .class = (PCI_CLASS_BRIDGE_HOST
<< 8),
439 .vendor
= PCI_VENDOR_ID_AMD
,
440 .device
= PCI_DEVICE_ID_AMD_FE_GATE_7006
,
441 .subvendor
= PCI_ANY_ID
,
442 .subdevice
= PCI_ANY_ID
,
445 .class = (PCI_CLASS_BRIDGE_HOST
<< 8),
447 .vendor
= PCI_VENDOR_ID_AMD
,
448 .device
= PCI_DEVICE_ID_AMD_FE_GATE_700E
,
449 .subvendor
= PCI_ANY_ID
,
450 .subdevice
= PCI_ANY_ID
,
453 .class = (PCI_CLASS_BRIDGE_HOST
<< 8),
455 .vendor
= PCI_VENDOR_ID_AMD
,
456 .device
= PCI_DEVICE_ID_AMD_FE_GATE_700C
,
457 .subvendor
= PCI_ANY_ID
,
458 .subdevice
= PCI_ANY_ID
,
463 MODULE_DEVICE_TABLE(pci
, agp_amdk7_pci_table
);
465 static struct pci_driver agp_amdk7_pci_driver
= {
466 .name
= "agpgart-amdk7",
467 .id_table
= agp_amdk7_pci_table
,
468 .probe
= agp_amdk7_probe
,
469 .remove
= agp_amdk7_remove
,
472 static int __init
agp_amdk7_init(void)
474 return pci_module_init(&agp_amdk7_pci_driver
);
477 static void __exit
agp_amdk7_cleanup(void)
479 pci_unregister_driver(&agp_amdk7_pci_driver
);
482 module_init(agp_amdk7_init
);
483 module_exit(agp_amdk7_cleanup
);
485 MODULE_LICENSE("GPL and additional rights");