2 * AMD K7 AGPGART routines.
5 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/agp_backend.h>
10 #include <linux/page-flags.h>
14 #define AMD_MMBASE 0x14
15 #define AMD_APSIZE 0xac
16 #define AMD_MODECNTL 0xb0
17 #define AMD_MODECNTL2 0xb2
18 #define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */
19 #define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */
20 #define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */
21 #define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */
23 static struct pci_device_id agp_amdk7_pci_table
[];
27 unsigned long __iomem
*remapped
;
30 static struct _amd_irongate_private
{
31 volatile u8 __iomem
*registers
;
32 struct amd_page_map
**gatt_pages
;
34 } amd_irongate_private
;
36 static int amd_create_page_map(struct amd_page_map
*page_map
)
40 page_map
->real
= (unsigned long *) __get_free_page(GFP_KERNEL
);
41 if (page_map
->real
== NULL
)
44 SetPageReserved(virt_to_page(page_map
->real
));
46 page_map
->remapped
= ioremap_nocache(virt_to_gart(page_map
->real
),
48 if (page_map
->remapped
== NULL
) {
49 ClearPageReserved(virt_to_page(page_map
->real
));
50 free_page((unsigned long) page_map
->real
);
51 page_map
->real
= NULL
;
56 for (i
= 0; i
< PAGE_SIZE
/ sizeof(unsigned long); i
++) {
57 writel(agp_bridge
->scratch_page
, page_map
->remapped
+i
);
58 readl(page_map
->remapped
+i
); /* PCI Posting. */
64 static void amd_free_page_map(struct amd_page_map
*page_map
)
66 iounmap(page_map
->remapped
);
67 ClearPageReserved(virt_to_page(page_map
->real
));
68 free_page((unsigned long) page_map
->real
);
71 static void amd_free_gatt_pages(void)
74 struct amd_page_map
**tables
;
75 struct amd_page_map
*entry
;
77 tables
= amd_irongate_private
.gatt_pages
;
78 for (i
= 0; i
< amd_irongate_private
.num_tables
; i
++) {
81 if (entry
->real
!= NULL
)
82 amd_free_page_map(entry
);
87 amd_irongate_private
.gatt_pages
= NULL
;
90 static int amd_create_gatt_pages(int nr_tables
)
92 struct amd_page_map
**tables
;
93 struct amd_page_map
*entry
;
97 tables
= kzalloc((nr_tables
+ 1) * sizeof(struct amd_page_map
*),GFP_KERNEL
);
101 for (i
= 0; i
< nr_tables
; i
++) {
102 entry
= kzalloc(sizeof(struct amd_page_map
), GFP_KERNEL
);
108 retval
= amd_create_page_map(entry
);
112 amd_irongate_private
.num_tables
= nr_tables
;
113 amd_irongate_private
.gatt_pages
= tables
;
116 amd_free_gatt_pages();
121 /* Since we don't need contiguous memory we just try
122 * to get the gatt table once
125 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
126 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
127 GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
128 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
129 #define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
130 GET_PAGE_DIR_IDX(addr)]->remapped)
132 static int amd_create_gatt_table(struct agp_bridge_data
*bridge
)
134 struct aper_size_info_lvl2
*value
;
135 struct amd_page_map page_dir
;
141 value
= A_SIZE_LVL2(agp_bridge
->current_size
);
142 retval
= amd_create_page_map(&page_dir
);
146 retval
= amd_create_gatt_pages(value
->num_entries
/ 1024);
148 amd_free_page_map(&page_dir
);
152 agp_bridge
->gatt_table_real
= (u32
*)page_dir
.real
;
153 agp_bridge
->gatt_table
= (u32 __iomem
*)page_dir
.remapped
;
154 agp_bridge
->gatt_bus_addr
= virt_to_gart(page_dir
.real
);
156 /* Get the address for the gart region.
157 * This is a bus address even on the alpha, b/c its
158 * used to program the agp master not the cpu
161 pci_read_config_dword(agp_bridge
->dev
, AGP_APBASE
, &temp
);
162 addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
163 agp_bridge
->gart_bus_addr
= addr
;
165 /* Calculate the agp offset */
166 for (i
= 0; i
< value
->num_entries
/ 1024; i
++, addr
+= 0x00400000) {
167 writel(virt_to_gart(amd_irongate_private
.gatt_pages
[i
]->real
) | 1,
168 page_dir
.remapped
+GET_PAGE_DIR_OFF(addr
));
169 readl(page_dir
.remapped
+GET_PAGE_DIR_OFF(addr
)); /* PCI Posting. */
175 static int amd_free_gatt_table(struct agp_bridge_data
*bridge
)
177 struct amd_page_map page_dir
;
179 page_dir
.real
= (unsigned long *)agp_bridge
->gatt_table_real
;
180 page_dir
.remapped
= (unsigned long __iomem
*)agp_bridge
->gatt_table
;
182 amd_free_gatt_pages();
183 amd_free_page_map(&page_dir
);
187 static int amd_irongate_fetch_size(void)
191 struct aper_size_info_lvl2
*values
;
193 pci_read_config_dword(agp_bridge
->dev
, AMD_APSIZE
, &temp
);
194 temp
= (temp
& 0x0000000e);
195 values
= A_SIZE_LVL2(agp_bridge
->driver
->aperture_sizes
);
196 for (i
= 0; i
< agp_bridge
->driver
->num_aperture_sizes
; i
++) {
197 if (temp
== values
[i
].size_value
) {
198 agp_bridge
->previous_size
=
199 agp_bridge
->current_size
= (void *) (values
+ i
);
201 agp_bridge
->aperture_size_idx
= i
;
202 return values
[i
].size
;
209 static int amd_irongate_configure(void)
211 struct aper_size_info_lvl2
*current_size
;
215 current_size
= A_SIZE_LVL2(agp_bridge
->current_size
);
217 /* Get the memory mapped registers */
218 pci_read_config_dword(agp_bridge
->dev
, AMD_MMBASE
, &temp
);
219 temp
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
220 amd_irongate_private
.registers
= (volatile u8 __iomem
*) ioremap(temp
, 4096);
222 /* Write out the address of the gatt table */
223 writel(agp_bridge
->gatt_bus_addr
, amd_irongate_private
.registers
+AMD_ATTBASE
);
224 readl(amd_irongate_private
.registers
+AMD_ATTBASE
); /* PCI Posting. */
226 /* Write the Sync register */
227 pci_write_config_byte(agp_bridge
->dev
, AMD_MODECNTL
, 0x80);
229 /* Set indexing mode */
230 pci_write_config_byte(agp_bridge
->dev
, AMD_MODECNTL2
, 0x00);
232 /* Write the enable register */
233 enable_reg
= readw(amd_irongate_private
.registers
+AMD_GARTENABLE
);
234 enable_reg
= (enable_reg
| 0x0004);
235 writew(enable_reg
, amd_irongate_private
.registers
+AMD_GARTENABLE
);
236 readw(amd_irongate_private
.registers
+AMD_GARTENABLE
); /* PCI Posting. */
238 /* Write out the size register */
239 pci_read_config_dword(agp_bridge
->dev
, AMD_APSIZE
, &temp
);
240 temp
= (((temp
& ~(0x0000000e)) | current_size
->size_value
) | 1);
241 pci_write_config_dword(agp_bridge
->dev
, AMD_APSIZE
, temp
);
244 writel(1, amd_irongate_private
.registers
+AMD_TLBFLUSH
);
245 readl(amd_irongate_private
.registers
+AMD_TLBFLUSH
); /* PCI Posting.*/
249 static void amd_irongate_cleanup(void)
251 struct aper_size_info_lvl2
*previous_size
;
255 previous_size
= A_SIZE_LVL2(agp_bridge
->previous_size
);
257 enable_reg
= readw(amd_irongate_private
.registers
+AMD_GARTENABLE
);
258 enable_reg
= (enable_reg
& ~(0x0004));
259 writew(enable_reg
, amd_irongate_private
.registers
+AMD_GARTENABLE
);
260 readw(amd_irongate_private
.registers
+AMD_GARTENABLE
); /* PCI Posting. */
262 /* Write back the previous size and disable gart translation */
263 pci_read_config_dword(agp_bridge
->dev
, AMD_APSIZE
, &temp
);
264 temp
= ((temp
& ~(0x0000000f)) | previous_size
->size_value
);
265 pci_write_config_dword(agp_bridge
->dev
, AMD_APSIZE
, temp
);
266 iounmap((void __iomem
*) amd_irongate_private
.registers
);
270 * This routine could be implemented by taking the addresses
271 * written to the GATT, and flushing them individually. However
272 * currently it just flushes the whole table. Which is probably
273 * more efficent, since agp_memory blocks can be a large number of
277 static void amd_irongate_tlbflush(struct agp_memory
*temp
)
279 writel(1, amd_irongate_private
.registers
+AMD_TLBFLUSH
);
280 readl(amd_irongate_private
.registers
+AMD_TLBFLUSH
); /* PCI Posting. */
283 static int amd_insert_memory(struct agp_memory
*mem
, off_t pg_start
, int type
)
285 int i
, j
, num_entries
;
286 unsigned long __iomem
*cur_gatt
;
289 num_entries
= A_SIZE_LVL2(agp_bridge
->current_size
)->num_entries
;
291 if (type
!= 0 || mem
->type
!= 0)
294 if ((pg_start
+ mem
->page_count
) > num_entries
)
298 while (j
< (pg_start
+ mem
->page_count
)) {
299 addr
= (j
* PAGE_SIZE
) + agp_bridge
->gart_bus_addr
;
300 cur_gatt
= GET_GATT(addr
);
301 if (!PGE_EMPTY(agp_bridge
, readl(cur_gatt
+GET_GATT_OFF(addr
))))
306 if (mem
->is_flushed
== FALSE
) {
307 global_cache_flush();
308 mem
->is_flushed
= TRUE
;
311 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
312 addr
= (j
* PAGE_SIZE
) + agp_bridge
->gart_bus_addr
;
313 cur_gatt
= GET_GATT(addr
);
314 writel(agp_generic_mask_memory(agp_bridge
,
315 mem
->memory
[i
], mem
->type
), cur_gatt
+GET_GATT_OFF(addr
));
316 readl(cur_gatt
+GET_GATT_OFF(addr
)); /* PCI Posting. */
318 amd_irongate_tlbflush(mem
);
322 static int amd_remove_memory(struct agp_memory
*mem
, off_t pg_start
, int type
)
325 unsigned long __iomem
*cur_gatt
;
328 if (type
!= 0 || mem
->type
!= 0)
331 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
332 addr
= (i
* PAGE_SIZE
) + agp_bridge
->gart_bus_addr
;
333 cur_gatt
= GET_GATT(addr
);
334 writel(agp_bridge
->scratch_page
, cur_gatt
+GET_GATT_OFF(addr
));
335 readl(cur_gatt
+GET_GATT_OFF(addr
)); /* PCI Posting. */
338 amd_irongate_tlbflush(mem
);
342 static struct aper_size_info_lvl2 amd_irongate_sizes
[7] =
344 {2048, 524288, 0x0000000c},
345 {1024, 262144, 0x0000000a},
346 {512, 131072, 0x00000008},
347 {256, 65536, 0x00000006},
348 {128, 32768, 0x00000004},
349 {64, 16384, 0x00000002},
350 {32, 8192, 0x00000000}
353 static struct gatt_mask amd_irongate_masks
[] =
355 {.mask
= 1, .type
= 0}
358 static struct agp_bridge_driver amd_irongate_driver
= {
359 .owner
= THIS_MODULE
,
360 .aperture_sizes
= amd_irongate_sizes
,
361 .size_type
= LVL2_APER_SIZE
,
362 .num_aperture_sizes
= 7,
363 .configure
= amd_irongate_configure
,
364 .fetch_size
= amd_irongate_fetch_size
,
365 .cleanup
= amd_irongate_cleanup
,
366 .tlb_flush
= amd_irongate_tlbflush
,
367 .mask_memory
= agp_generic_mask_memory
,
368 .masks
= amd_irongate_masks
,
369 .agp_enable
= agp_generic_enable
,
370 .cache_flush
= global_cache_flush
,
371 .create_gatt_table
= amd_create_gatt_table
,
372 .free_gatt_table
= amd_free_gatt_table
,
373 .insert_memory
= amd_insert_memory
,
374 .remove_memory
= amd_remove_memory
,
375 .alloc_by_type
= agp_generic_alloc_by_type
,
376 .free_by_type
= agp_generic_free_by_type
,
377 .agp_alloc_page
= agp_generic_alloc_page
,
378 .agp_destroy_page
= agp_generic_destroy_page
,
381 static struct agp_device_ids amd_agp_device_ids
[] __devinitdata
=
384 .device_id
= PCI_DEVICE_ID_AMD_FE_GATE_7006
,
385 .chipset_name
= "Irongate",
388 .device_id
= PCI_DEVICE_ID_AMD_FE_GATE_700E
,
389 .chipset_name
= "761",
392 .device_id
= PCI_DEVICE_ID_AMD_FE_GATE_700C
,
393 .chipset_name
= "760MP",
395 { }, /* dummy final entry, always present */
398 static int __devinit
agp_amdk7_probe(struct pci_dev
*pdev
,
399 const struct pci_device_id
*ent
)
401 struct agp_bridge_data
*bridge
;
405 cap_ptr
= pci_find_capability(pdev
, PCI_CAP_ID_AGP
);
409 j
= ent
- agp_amdk7_pci_table
;
410 printk(KERN_INFO PFX
"Detected AMD %s chipset\n",
411 amd_agp_device_ids
[j
].chipset_name
);
413 bridge
= agp_alloc_bridge();
417 bridge
->driver
= &amd_irongate_driver
;
418 bridge
->dev_private_data
= &amd_irongate_private
,
420 bridge
->capndx
= cap_ptr
;
422 /* 751 Errata (22564_B-1.PDF)
423 erratum 20: strobe glitch with Nvidia NV10 GeForce cards.
424 system controller may experience noise due to strong drive strengths
426 if (agp_bridge
->dev
->device
== PCI_DEVICE_ID_AMD_FE_GATE_7006
) {
428 struct pci_dev
*gfxcard
=NULL
;
430 gfxcard
= pci_get_class(PCI_CLASS_DISPLAY_VGA
<<8, gfxcard
);
432 printk (KERN_INFO PFX
"Couldn't find an AGP VGA controller.\n");
435 cap_ptr
= pci_find_capability(gfxcard
, PCI_CAP_ID_AGP
);
437 pci_dev_put(gfxcard
);
442 /* With so many variants of NVidia cards, it's simpler just
443 to blacklist them all, and then whitelist them as needed
444 (if necessary at all). */
445 if (gfxcard
->vendor
== PCI_VENDOR_ID_NVIDIA
) {
446 agp_bridge
->flags
|= AGP_ERRATA_1X
;
447 printk (KERN_INFO PFX
"AMD 751 chipset with NVidia GeForce detected. Forcing to 1X due to errata.\n");
449 pci_dev_put(gfxcard
);
452 /* 761 Errata (23613_F.pdf)
453 * Revisions B0/B1 were a disaster.
454 * erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X
455 * erratum 45: Timing problem prevents fast writes -- Disable fast write.
456 * erratum 46: Setup violation on AGP SBA pins - Disable side band addressing.
457 * With this lot disabled, we should prevent lockups. */
458 if (agp_bridge
->dev
->device
== PCI_DEVICE_ID_AMD_FE_GATE_700E
) {
460 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &revision
);
461 if (revision
== 0x10 || revision
== 0x11) {
462 agp_bridge
->flags
= AGP_ERRATA_FASTWRITES
;
463 agp_bridge
->flags
|= AGP_ERRATA_SBA
;
464 agp_bridge
->flags
|= AGP_ERRATA_1X
;
465 printk (KERN_INFO PFX
"AMD 761 chipset with errata detected - disabling AGP fast writes & SBA and forcing to 1X.\n");
469 /* Fill in the mode register */
470 pci_read_config_dword(pdev
,
471 bridge
->capndx
+PCI_AGP_STATUS
,
474 pci_set_drvdata(pdev
, bridge
);
475 return agp_add_bridge(bridge
);
478 static void __devexit
agp_amdk7_remove(struct pci_dev
*pdev
)
480 struct agp_bridge_data
*bridge
= pci_get_drvdata(pdev
);
482 agp_remove_bridge(bridge
);
483 agp_put_bridge(bridge
);
486 /* must be the same order as name table above */
487 static struct pci_device_id agp_amdk7_pci_table
[] = {
489 .class = (PCI_CLASS_BRIDGE_HOST
<< 8),
491 .vendor
= PCI_VENDOR_ID_AMD
,
492 .device
= PCI_DEVICE_ID_AMD_FE_GATE_7006
,
493 .subvendor
= PCI_ANY_ID
,
494 .subdevice
= PCI_ANY_ID
,
497 .class = (PCI_CLASS_BRIDGE_HOST
<< 8),
499 .vendor
= PCI_VENDOR_ID_AMD
,
500 .device
= PCI_DEVICE_ID_AMD_FE_GATE_700E
,
501 .subvendor
= PCI_ANY_ID
,
502 .subdevice
= PCI_ANY_ID
,
505 .class = (PCI_CLASS_BRIDGE_HOST
<< 8),
507 .vendor
= PCI_VENDOR_ID_AMD
,
508 .device
= PCI_DEVICE_ID_AMD_FE_GATE_700C
,
509 .subvendor
= PCI_ANY_ID
,
510 .subdevice
= PCI_ANY_ID
,
515 MODULE_DEVICE_TABLE(pci
, agp_amdk7_pci_table
);
517 static struct pci_driver agp_amdk7_pci_driver
= {
518 .name
= "agpgart-amdk7",
519 .id_table
= agp_amdk7_pci_table
,
520 .probe
= agp_amdk7_probe
,
521 .remove
= agp_amdk7_remove
,
524 static int __init
agp_amdk7_init(void)
528 return pci_register_driver(&agp_amdk7_pci_driver
);
531 static void __exit
agp_amdk7_cleanup(void)
533 pci_unregister_driver(&agp_amdk7_pci_driver
);
536 module_init(agp_amdk7_init
);
537 module_exit(agp_amdk7_cleanup
);
539 MODULE_LICENSE("GPL and additional rights");