2 * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of
3 * the "Intel 460GTX Chipset Software Developer's Manual":
4 * http://developer.intel.com/design/itanium/downloads/24870401s.htm
7 * 460GX support by Chris Ahna <christopher.j.ahna@intel.com>
8 * Clean up & simplification by David Mosberger-Tang <davidm@hpl.hp.com>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/init.h>
13 #include <linux/string.h>
14 #include <linux/slab.h>
15 #include <linux/agp_backend.h>
16 #include <linux/log2.h>
20 #define INTEL_I460_BAPBASE 0x98
21 #define INTEL_I460_GXBCTL 0xa0
22 #define INTEL_I460_AGPSIZ 0xa2
23 #define INTEL_I460_ATTBASE 0xfe200000
24 #define INTEL_I460_GATT_VALID (1UL << 24)
25 #define INTEL_I460_GATT_COHERENT (1UL << 25)
28 * The i460 can operate with large (4MB) pages, but there is no sane way to support this
29 * within the current kernel/DRM environment, so we disable the relevant code for now.
30 * See also comments in ia64_alloc_page()...
32 #define I460_LARGE_IO_PAGES 0
34 #if I460_LARGE_IO_PAGES
35 # define I460_IO_PAGE_SHIFT i460.io_page_shift
37 # define I460_IO_PAGE_SHIFT 12
40 #define I460_IOPAGES_PER_KPAGE (PAGE_SIZE >> I460_IO_PAGE_SHIFT)
41 #define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT))
42 #define I460_SRAM_IO_DISABLE (1 << 4)
43 #define I460_BAPBASE_ENABLE (1 << 3)
44 #define I460_AGPSIZ_MASK 0x7
45 #define I460_4M_PS (1 << 1)
47 /* Control bits for Out-Of-GART coherency and Burst Write Combining */
48 #define I460_GXBCTL_OOG (1UL << 0)
49 #define I460_GXBCTL_BWC (1UL << 2)
52 * gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the
53 * gatt_table and gatt_table_real pointers a "void *"...
55 #define RD_GATT(index) readl((u32 *) i460.gatt + (index))
56 #define WR_GATT(index, val) writel((val), (u32 *) i460.gatt + (index))
58 * The 460 spec says we have to read the last location written to make sure that all
59 * writes have taken effect
61 #define WR_FLUSH_GATT(index) RD_GATT(index)
63 static unsigned long i460_mask_memory (struct agp_bridge_data
*bridge
,
64 dma_addr_t addr
, int type
);
67 void *gatt
; /* ioremap'd GATT area */
69 /* i460 supports multiple GART page sizes, so GART pageshift is dynamic: */
72 /* BIOS configures chipset to one of 2 possible apbase values: */
75 /* structure for tracking partial use of 4MB GART pages: */
77 unsigned long *alloced_map
; /* bitmap of kernel-pages in use */
78 int refcount
; /* number of kernel pages using the large page */
79 u64 paddr
; /* physical address of large page */
80 struct page
*page
; /* page pointer */
84 static const struct aper_size_info_8 i460_sizes
[3] =
87 * The 32GB aperture is only available with a 4M GART page size. Due to the
88 * dynamic GART page size, we can't figure out page_order or num_entries until
96 static struct gatt_mask i460_masks
[] =
99 .mask
= INTEL_I460_GATT_VALID
| INTEL_I460_GATT_COHERENT
,
104 static int i460_fetch_size (void)
108 struct aper_size_info_8
*values
;
110 /* Determine the GART page size */
111 pci_read_config_byte(agp_bridge
->dev
, INTEL_I460_GXBCTL
, &temp
);
112 i460
.io_page_shift
= (temp
& I460_4M_PS
) ? 22 : 12;
113 pr_debug("i460_fetch_size: io_page_shift=%d\n", i460
.io_page_shift
);
115 if (i460
.io_page_shift
!= I460_IO_PAGE_SHIFT
) {
117 "I/O (GART) page-size %luKB doesn't match expected "
119 1UL << (i460
.io_page_shift
- 10),
120 1UL << (I460_IO_PAGE_SHIFT
));
124 values
= A_SIZE_8(agp_bridge
->driver
->aperture_sizes
);
126 pci_read_config_byte(agp_bridge
->dev
, INTEL_I460_AGPSIZ
, &temp
);
128 /* Exit now if the IO drivers for the GART SRAMS are turned off */
129 if (temp
& I460_SRAM_IO_DISABLE
) {
130 printk(KERN_ERR PFX
"GART SRAMS disabled on 460GX chipset\n");
131 printk(KERN_ERR PFX
"AGPGART operation not possible\n");
135 /* Make sure we don't try to create an 2 ^ 23 entry GATT */
136 if ((i460
.io_page_shift
== 0) && ((temp
& I460_AGPSIZ_MASK
) == 4)) {
137 printk(KERN_ERR PFX
"We can't have a 32GB aperture with 4KB GART pages\n");
141 /* Determine the proper APBASE register */
142 if (temp
& I460_BAPBASE_ENABLE
)
143 i460
.dynamic_apbase
= INTEL_I460_BAPBASE
;
145 i460
.dynamic_apbase
= AGP_APBASE
;
147 for (i
= 0; i
< agp_bridge
->driver
->num_aperture_sizes
; i
++) {
149 * Dynamically calculate the proper num_entries and page_order values for
150 * the define aperture sizes. Take care not to shift off the end of
153 values
[i
].num_entries
= (values
[i
].size
<< 8) >> (I460_IO_PAGE_SHIFT
- 12);
154 values
[i
].page_order
= ilog2((sizeof(u32
)*values
[i
].num_entries
) >> PAGE_SHIFT
);
157 for (i
= 0; i
< agp_bridge
->driver
->num_aperture_sizes
; i
++) {
158 /* Neglect control bits when matching up size_value */
159 if ((temp
& I460_AGPSIZ_MASK
) == values
[i
].size_value
) {
160 agp_bridge
->previous_size
= agp_bridge
->current_size
= (void *) (values
+ i
);
161 agp_bridge
->aperture_size_idx
= i
;
162 return values
[i
].size
;
169 /* There isn't anything to do here since 460 has no GART TLB. */
170 static void i460_tlb_flush (struct agp_memory
*mem
)
176 * This utility function is needed to prevent corruption of the control bits
177 * which are stored along with the aperture size in 460's AGPSIZ register
179 static void i460_write_agpsiz (u8 size_value
)
183 pci_read_config_byte(agp_bridge
->dev
, INTEL_I460_AGPSIZ
, &temp
);
184 pci_write_config_byte(agp_bridge
->dev
, INTEL_I460_AGPSIZ
,
185 ((temp
& ~I460_AGPSIZ_MASK
) | size_value
));
188 static void i460_cleanup (void)
190 struct aper_size_info_8
*previous_size
;
192 previous_size
= A_SIZE_8(agp_bridge
->previous_size
);
193 i460_write_agpsiz(previous_size
->size_value
);
195 if (I460_IO_PAGE_SHIFT
> PAGE_SHIFT
)
199 static int i460_configure (void)
207 struct aper_size_info_8
*current_size
;
211 current_size
= A_SIZE_8(agp_bridge
->current_size
);
212 i460_write_agpsiz(current_size
->size_value
);
215 * Do the necessary rigmarole to read all eight bytes of APBASE.
216 * This has to be done since the AGP aperture can be above 4GB on
219 pci_read_config_dword(agp_bridge
->dev
, i460
.dynamic_apbase
, &(temp
.small
[0]));
220 pci_read_config_dword(agp_bridge
->dev
, i460
.dynamic_apbase
+ 4, &(temp
.small
[1]));
222 /* Clear BAR control bits */
223 agp_bridge
->gart_bus_addr
= temp
.large
& ~((1UL << 3) - 1);
225 pci_read_config_byte(agp_bridge
->dev
, INTEL_I460_GXBCTL
, &scratch
);
226 pci_write_config_byte(agp_bridge
->dev
, INTEL_I460_GXBCTL
,
227 (scratch
& 0x02) | I460_GXBCTL_OOG
| I460_GXBCTL_BWC
);
230 * Initialize partial allocation trackers if a GART page is bigger than a kernel
233 if (I460_IO_PAGE_SHIFT
> PAGE_SHIFT
) {
234 size
= current_size
->num_entries
* sizeof(i460
.lp_desc
[0]);
235 i460
.lp_desc
= kzalloc(size
, GFP_KERNEL
);
242 static int i460_create_gatt_table (struct agp_bridge_data
*bridge
)
244 int page_order
, num_entries
, i
;
248 * Load up the fixed address of the GART SRAMS which hold our GATT table.
250 temp
= agp_bridge
->current_size
;
251 page_order
= A_SIZE_8(temp
)->page_order
;
252 num_entries
= A_SIZE_8(temp
)->num_entries
;
254 i460
.gatt
= ioremap(INTEL_I460_ATTBASE
, PAGE_SIZE
<< page_order
);
256 printk(KERN_ERR PFX
"ioremap failed\n");
260 /* These are no good, the should be removed from the agp_bridge strucure... */
261 agp_bridge
->gatt_table_real
= NULL
;
262 agp_bridge
->gatt_table
= NULL
;
263 agp_bridge
->gatt_bus_addr
= 0;
265 for (i
= 0; i
< num_entries
; ++i
)
267 WR_FLUSH_GATT(i
- 1);
271 static int i460_free_gatt_table (struct agp_bridge_data
*bridge
)
276 temp
= agp_bridge
->current_size
;
278 num_entries
= A_SIZE_8(temp
)->num_entries
;
280 for (i
= 0; i
< num_entries
; ++i
)
282 WR_FLUSH_GATT(num_entries
- 1);
289 * The following functions are called when the I/O (GART) page size is smaller than
293 static int i460_insert_memory_small_io_page (struct agp_memory
*mem
,
294 off_t pg_start
, int type
)
296 unsigned long paddr
, io_pg_start
, io_page_size
;
297 int i
, j
, k
, num_entries
;
300 pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
301 mem
, pg_start
, type
, page_to_phys(mem
->pages
[0]));
303 if (type
>= AGP_USER_TYPES
|| mem
->type
>= AGP_USER_TYPES
)
306 io_pg_start
= I460_IOPAGES_PER_KPAGE
* pg_start
;
308 temp
= agp_bridge
->current_size
;
309 num_entries
= A_SIZE_8(temp
)->num_entries
;
311 if ((io_pg_start
+ I460_IOPAGES_PER_KPAGE
* mem
->page_count
) > num_entries
) {
312 printk(KERN_ERR PFX
"Looks like we're out of AGP memory\n");
317 while (j
< (io_pg_start
+ I460_IOPAGES_PER_KPAGE
* mem
->page_count
)) {
318 if (!PGE_EMPTY(agp_bridge
, RD_GATT(j
))) {
319 pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n",
326 io_page_size
= 1UL << I460_IO_PAGE_SHIFT
;
327 for (i
= 0, j
= io_pg_start
; i
< mem
->page_count
; i
++) {
328 paddr
= page_to_phys(mem
->pages
[i
]);
329 for (k
= 0; k
< I460_IOPAGES_PER_KPAGE
; k
++, j
++, paddr
+= io_page_size
)
330 WR_GATT(j
, i460_mask_memory(agp_bridge
, paddr
, mem
->type
));
332 WR_FLUSH_GATT(j
- 1);
336 static int i460_remove_memory_small_io_page(struct agp_memory
*mem
,
337 off_t pg_start
, int type
)
341 pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n",
342 mem
, pg_start
, type
);
344 pg_start
= I460_IOPAGES_PER_KPAGE
* pg_start
;
346 for (i
= pg_start
; i
< (pg_start
+ I460_IOPAGES_PER_KPAGE
* mem
->page_count
); i
++)
348 WR_FLUSH_GATT(i
- 1);
352 #if I460_LARGE_IO_PAGES
355 * These functions are called when the I/O (GART) page size exceeds PAGE_SIZE.
357 * This situation is interesting since AGP memory allocations that are smaller than a
358 * single GART page are possible. The i460.lp_desc array tracks partial allocation of the
359 * large GART pages to work around this issue.
361 * i460.lp_desc[pg_num].refcount tracks the number of kernel pages in use within GART page
362 * pg_num. i460.lp_desc[pg_num].paddr is the physical address of the large page and
363 * i460.lp_desc[pg_num].alloced_map is a bitmap of kernel pages that are in use (allocated).
366 static int i460_alloc_large_page (struct lp_desc
*lp
)
368 unsigned long order
= I460_IO_PAGE_SHIFT
- PAGE_SHIFT
;
371 lp
->page
= alloc_pages(GFP_KERNEL
, order
);
373 printk(KERN_ERR PFX
"Couldn't alloc 4M GART page...\n");
377 map_size
= ((I460_KPAGES_PER_IOPAGE
+ BITS_PER_LONG
- 1) & -BITS_PER_LONG
)/8;
378 lp
->alloced_map
= kzalloc(map_size
, GFP_KERNEL
);
379 if (!lp
->alloced_map
) {
380 __free_pages(lp
->page
, order
);
381 printk(KERN_ERR PFX
"Out of memory, we're in trouble...\n");
385 lp
->paddr
= page_to_phys(lp
->page
);
387 atomic_add(I460_KPAGES_PER_IOPAGE
, &agp_bridge
->current_memory_agp
);
391 static void i460_free_large_page (struct lp_desc
*lp
)
393 kfree(lp
->alloced_map
);
394 lp
->alloced_map
= NULL
;
396 __free_pages(lp
->page
, I460_IO_PAGE_SHIFT
- PAGE_SHIFT
);
397 atomic_sub(I460_KPAGES_PER_IOPAGE
, &agp_bridge
->current_memory_agp
);
400 static int i460_insert_memory_large_io_page (struct agp_memory
*mem
,
401 off_t pg_start
, int type
)
403 int i
, start_offset
, end_offset
, idx
, pg
, num_entries
;
404 struct lp_desc
*start
, *end
, *lp
;
407 if (type
>= AGP_USER_TYPES
|| mem
->type
>= AGP_USER_TYPES
)
410 temp
= agp_bridge
->current_size
;
411 num_entries
= A_SIZE_8(temp
)->num_entries
;
413 /* Figure out what pg_start means in terms of our large GART pages */
414 start
= &i460
.lp_desc
[pg_start
/ I460_KPAGES_PER_IOPAGE
];
415 end
= &i460
.lp_desc
[(pg_start
+ mem
->page_count
- 1) / I460_KPAGES_PER_IOPAGE
];
416 start_offset
= pg_start
% I460_KPAGES_PER_IOPAGE
;
417 end_offset
= (pg_start
+ mem
->page_count
- 1) % I460_KPAGES_PER_IOPAGE
;
419 if (end
> i460
.lp_desc
+ num_entries
) {
420 printk(KERN_ERR PFX
"Looks like we're out of AGP memory\n");
424 /* Check if the requested region of the aperture is free */
425 for (lp
= start
; lp
<= end
; ++lp
) {
426 if (!lp
->alloced_map
)
427 continue; /* OK, the entire large page is available... */
429 for (idx
= ((lp
== start
) ? start_offset
: 0);
430 idx
< ((lp
== end
) ? (end_offset
+ 1) : I460_KPAGES_PER_IOPAGE
);
433 if (test_bit(idx
, lp
->alloced_map
))
438 for (lp
= start
, i
= 0; lp
<= end
; ++lp
) {
439 if (!lp
->alloced_map
) {
440 /* Allocate new GART pages... */
441 if (i460_alloc_large_page(lp
) < 0)
443 pg
= lp
- i460
.lp_desc
;
444 WR_GATT(pg
, i460_mask_memory(agp_bridge
,
449 for (idx
= ((lp
== start
) ? start_offset
: 0);
450 idx
< ((lp
== end
) ? (end_offset
+ 1) : I460_KPAGES_PER_IOPAGE
);
453 mem
->pages
[i
] = lp
->page
;
454 __set_bit(idx
, lp
->alloced_map
);
461 static int i460_remove_memory_large_io_page (struct agp_memory
*mem
,
462 off_t pg_start
, int type
)
464 int i
, pg
, start_offset
, end_offset
, idx
, num_entries
;
465 struct lp_desc
*start
, *end
, *lp
;
468 temp
= agp_bridge
->current_size
;
469 num_entries
= A_SIZE_8(temp
)->num_entries
;
471 /* Figure out what pg_start means in terms of our large GART pages */
472 start
= &i460
.lp_desc
[pg_start
/ I460_KPAGES_PER_IOPAGE
];
473 end
= &i460
.lp_desc
[(pg_start
+ mem
->page_count
- 1) / I460_KPAGES_PER_IOPAGE
];
474 start_offset
= pg_start
% I460_KPAGES_PER_IOPAGE
;
475 end_offset
= (pg_start
+ mem
->page_count
- 1) % I460_KPAGES_PER_IOPAGE
;
477 for (i
= 0, lp
= start
; lp
<= end
; ++lp
) {
478 for (idx
= ((lp
== start
) ? start_offset
: 0);
479 idx
< ((lp
== end
) ? (end_offset
+ 1) : I460_KPAGES_PER_IOPAGE
);
482 mem
->pages
[i
] = NULL
;
483 __clear_bit(idx
, lp
->alloced_map
);
487 /* Free GART pages if they are unused */
488 if (lp
->refcount
== 0) {
489 pg
= lp
- i460
.lp_desc
;
492 i460_free_large_page(lp
);
498 /* Wrapper routines to call the approriate {small_io_page,large_io_page} function */
500 static int i460_insert_memory (struct agp_memory
*mem
,
501 off_t pg_start
, int type
)
503 if (I460_IO_PAGE_SHIFT
<= PAGE_SHIFT
)
504 return i460_insert_memory_small_io_page(mem
, pg_start
, type
);
506 return i460_insert_memory_large_io_page(mem
, pg_start
, type
);
509 static int i460_remove_memory (struct agp_memory
*mem
,
510 off_t pg_start
, int type
)
512 if (I460_IO_PAGE_SHIFT
<= PAGE_SHIFT
)
513 return i460_remove_memory_small_io_page(mem
, pg_start
, type
);
515 return i460_remove_memory_large_io_page(mem
, pg_start
, type
);
519 * If the I/O (GART) page size is bigger than the kernel page size, we don't want to
520 * allocate memory until we know where it is to be bound in the aperture (a
521 * multi-kernel-page alloc might fit inside of an already allocated GART page).
523 * Let's just hope nobody counts on the allocated AGP memory being there before bind time
524 * (I don't think current drivers do)...
526 static struct page
*i460_alloc_page (struct agp_bridge_data
*bridge
)
530 if (I460_IO_PAGE_SHIFT
<= PAGE_SHIFT
) {
531 page
= agp_generic_alloc_page(agp_bridge
);
533 /* Returning NULL would cause problems */
534 /* AK: really dubious code. */
539 static void i460_destroy_page (struct page
*page
, int flags
)
541 if (I460_IO_PAGE_SHIFT
<= PAGE_SHIFT
) {
542 agp_generic_destroy_page(page
, flags
);
546 #endif /* I460_LARGE_IO_PAGES */
548 static unsigned long i460_mask_memory (struct agp_bridge_data
*bridge
,
549 dma_addr_t addr
, int type
)
551 /* Make sure the returned address is a valid GATT entry */
552 return bridge
->driver
->masks
[0].mask
553 | (((addr
& ~((1 << I460_IO_PAGE_SHIFT
) - 1)) & 0xfffff000) >> 12);
556 const struct agp_bridge_driver intel_i460_driver
= {
557 .owner
= THIS_MODULE
,
558 .aperture_sizes
= i460_sizes
,
559 .size_type
= U8_APER_SIZE
,
560 .num_aperture_sizes
= 3,
561 .configure
= i460_configure
,
562 .fetch_size
= i460_fetch_size
,
563 .cleanup
= i460_cleanup
,
564 .tlb_flush
= i460_tlb_flush
,
565 .mask_memory
= i460_mask_memory
,
567 .agp_enable
= agp_generic_enable
,
568 .cache_flush
= global_cache_flush
,
569 .create_gatt_table
= i460_create_gatt_table
,
570 .free_gatt_table
= i460_free_gatt_table
,
571 #if I460_LARGE_IO_PAGES
572 .insert_memory
= i460_insert_memory
,
573 .remove_memory
= i460_remove_memory
,
574 .agp_alloc_page
= i460_alloc_page
,
575 .agp_destroy_page
= i460_destroy_page
,
577 .insert_memory
= i460_insert_memory_small_io_page
,
578 .remove_memory
= i460_remove_memory_small_io_page
,
579 .agp_alloc_page
= agp_generic_alloc_page
,
580 .agp_alloc_pages
= agp_generic_alloc_pages
,
581 .agp_destroy_page
= agp_generic_destroy_page
,
582 .agp_destroy_pages
= agp_generic_destroy_pages
,
584 .alloc_by_type
= agp_generic_alloc_by_type
,
585 .free_by_type
= agp_generic_free_by_type
,
586 .agp_type_to_mask_type
= agp_generic_type_to_mask_type
,
587 .cant_use_aperture
= true,
590 static int __devinit
agp_intel_i460_probe(struct pci_dev
*pdev
,
591 const struct pci_device_id
*ent
)
593 struct agp_bridge_data
*bridge
;
596 cap_ptr
= pci_find_capability(pdev
, PCI_CAP_ID_AGP
);
600 bridge
= agp_alloc_bridge();
604 bridge
->driver
= &intel_i460_driver
;
606 bridge
->capndx
= cap_ptr
;
608 printk(KERN_INFO PFX
"Detected Intel 460GX chipset\n");
610 pci_set_drvdata(pdev
, bridge
);
611 return agp_add_bridge(bridge
);
614 static void __devexit
agp_intel_i460_remove(struct pci_dev
*pdev
)
616 struct agp_bridge_data
*bridge
= pci_get_drvdata(pdev
);
618 agp_remove_bridge(bridge
);
619 agp_put_bridge(bridge
);
622 static struct pci_device_id agp_intel_i460_pci_table
[] = {
624 .class = (PCI_CLASS_BRIDGE_HOST
<< 8),
626 .vendor
= PCI_VENDOR_ID_INTEL
,
627 .device
= PCI_DEVICE_ID_INTEL_84460GX
,
628 .subvendor
= PCI_ANY_ID
,
629 .subdevice
= PCI_ANY_ID
,
634 MODULE_DEVICE_TABLE(pci
, agp_intel_i460_pci_table
);
636 static struct pci_driver agp_intel_i460_pci_driver
= {
637 .name
= "agpgart-intel-i460",
638 .id_table
= agp_intel_i460_pci_table
,
639 .probe
= agp_intel_i460_probe
,
640 .remove
= __devexit_p(agp_intel_i460_remove
),
643 static int __init
agp_intel_i460_init(void)
647 return pci_register_driver(&agp_intel_i460_pci_driver
);
650 static void __exit
agp_intel_i460_cleanup(void)
652 pci_unregister_driver(&agp_intel_i460_pci_driver
);
655 module_init(agp_intel_i460_init
);
656 module_exit(agp_intel_i460_cleanup
);
658 MODULE_AUTHOR("Chris Ahna <Christopher.J.Ahna@intel.com>");
659 MODULE_LICENSE("GPL and additional rights");