2 * Intel GTT (Graphics Translation Table) routines
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
15 * /fairy-tale-mode off
19 * If we have Intel graphics, we're not going to have anything other than
20 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
21 * on the Intel IOMMU support (CONFIG_DMAR).
22 * Only newer chipsets need to bother with this, of course.
25 #define USE_PCI_DMA_API 1
28 /* Max amount of stolen space, anything above will be returned to Linux */
29 int intel_max_stolen
= 32 * 1024 * 1024;
30 EXPORT_SYMBOL(intel_max_stolen
);
32 static const struct aper_size_info_fixed intel_i810_sizes
[] =
35 /* The 32M mode still requires a 64k gatt */
39 #define AGP_DCACHE_MEMORY 1
40 #define AGP_PHYS_MEMORY 2
41 #define INTEL_AGP_CACHED_MEMORY 3
43 static struct gatt_mask intel_i810_masks
[] =
45 {.mask
= I810_PTE_VALID
, .type
= 0},
46 {.mask
= (I810_PTE_VALID
| I810_PTE_LOCAL
), .type
= AGP_DCACHE_MEMORY
},
47 {.mask
= I810_PTE_VALID
, .type
= 0},
48 {.mask
= I810_PTE_VALID
| I830_PTE_SYSTEM_CACHED
,
49 .type
= INTEL_AGP_CACHED_MEMORY
}
52 #define INTEL_AGP_UNCACHED_MEMORY 0
53 #define INTEL_AGP_CACHED_MEMORY_LLC 1
54 #define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
55 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
56 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
58 static struct gatt_mask intel_gen6_masks
[] =
60 {.mask
= I810_PTE_VALID
| GEN6_PTE_UNCACHED
,
61 .type
= INTEL_AGP_UNCACHED_MEMORY
},
62 {.mask
= I810_PTE_VALID
| GEN6_PTE_LLC
,
63 .type
= INTEL_AGP_CACHED_MEMORY_LLC
},
64 {.mask
= I810_PTE_VALID
| GEN6_PTE_LLC
| GEN6_PTE_GFDT
,
65 .type
= INTEL_AGP_CACHED_MEMORY_LLC_GFDT
},
66 {.mask
= I810_PTE_VALID
| GEN6_PTE_LLC_MLC
,
67 .type
= INTEL_AGP_CACHED_MEMORY_LLC_MLC
},
68 {.mask
= I810_PTE_VALID
| GEN6_PTE_LLC_MLC
| GEN6_PTE_GFDT
,
69 .type
= INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT
},
72 static struct _intel_private
{
73 struct pci_dev
*pcidev
; /* device one */
74 u8 __iomem
*registers
;
75 u32 __iomem
*gtt
; /* I915G */
76 int num_dcache_entries
;
77 /* gtt_entries is the number of gtt entries that are already mapped
78 * to stolen memory. Stolen memory is larger than the memory mapped
79 * through gtt_entries, as it includes some reserved space for the BIOS
80 * popup and for the GTT.
82 int gtt_entries
; /* i830+ */
85 void __iomem
*i9xx_flush_page
;
86 void *i8xx_flush_page
;
88 struct page
*i8xx_page
;
89 struct resource ifp_resource
;
93 #ifdef USE_PCI_DMA_API
94 static int intel_agp_map_page(struct page
*page
, dma_addr_t
*ret
)
96 *ret
= pci_map_page(intel_private
.pcidev
, page
, 0,
97 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
98 if (pci_dma_mapping_error(intel_private
.pcidev
, *ret
))
103 static void intel_agp_unmap_page(struct page
*page
, dma_addr_t dma
)
105 pci_unmap_page(intel_private
.pcidev
, dma
,
106 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
109 static void intel_agp_free_sglist(struct agp_memory
*mem
)
113 st
.sgl
= mem
->sg_list
;
114 st
.orig_nents
= st
.nents
= mem
->page_count
;
122 static int intel_agp_map_memory(struct agp_memory
*mem
)
125 struct scatterlist
*sg
;
128 DBG("try mapping %lu pages\n", (unsigned long)mem
->page_count
);
130 if (sg_alloc_table(&st
, mem
->page_count
, GFP_KERNEL
))
133 mem
->sg_list
= sg
= st
.sgl
;
135 for (i
= 0 ; i
< mem
->page_count
; i
++, sg
= sg_next(sg
))
136 sg_set_page(sg
, mem
->pages
[i
], PAGE_SIZE
, 0);
138 mem
->num_sg
= pci_map_sg(intel_private
.pcidev
, mem
->sg_list
,
139 mem
->page_count
, PCI_DMA_BIDIRECTIONAL
);
140 if (unlikely(!mem
->num_sg
))
150 static void intel_agp_unmap_memory(struct agp_memory
*mem
)
152 DBG("try unmapping %lu pages\n", (unsigned long)mem
->page_count
);
154 pci_unmap_sg(intel_private
.pcidev
, mem
->sg_list
,
155 mem
->page_count
, PCI_DMA_BIDIRECTIONAL
);
156 intel_agp_free_sglist(mem
);
159 static void intel_agp_insert_sg_entries(struct agp_memory
*mem
,
160 off_t pg_start
, int mask_type
)
162 struct scatterlist
*sg
;
167 WARN_ON(!mem
->num_sg
);
169 if (mem
->num_sg
== mem
->page_count
) {
170 for_each_sg(mem
->sg_list
, sg
, mem
->page_count
, i
) {
171 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
172 sg_dma_address(sg
), mask_type
),
173 intel_private
.gtt
+j
);
177 /* sg may merge pages, but we have to separate
178 * per-page addr for GTT */
181 for_each_sg(mem
->sg_list
, sg
, mem
->num_sg
, i
) {
182 len
= sg_dma_len(sg
) / PAGE_SIZE
;
183 for (m
= 0; m
< len
; m
++) {
184 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
185 sg_dma_address(sg
) + m
* PAGE_SIZE
,
187 intel_private
.gtt
+j
);
192 readl(intel_private
.gtt
+j
-1);
197 static void intel_agp_insert_sg_entries(struct agp_memory
*mem
,
198 off_t pg_start
, int mask_type
)
202 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
203 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
204 page_to_phys(mem
->pages
[i
]), mask_type
),
205 intel_private
.gtt
+j
);
208 readl(intel_private
.gtt
+j
-1);
213 static int intel_i810_fetch_size(void)
216 struct aper_size_info_fixed
*values
;
218 pci_read_config_dword(agp_bridge
->dev
, I810_SMRAM_MISCC
, &smram_miscc
);
219 values
= A_SIZE_FIX(agp_bridge
->driver
->aperture_sizes
);
221 if ((smram_miscc
& I810_GMS
) == I810_GMS_DISABLE
) {
222 dev_warn(&agp_bridge
->dev
->dev
, "i810 is disabled\n");
225 if ((smram_miscc
& I810_GFX_MEM_WIN_SIZE
) == I810_GFX_MEM_WIN_32M
) {
226 agp_bridge
->current_size
= (void *) (values
+ 1);
227 agp_bridge
->aperture_size_idx
= 1;
228 return values
[1].size
;
230 agp_bridge
->current_size
= (void *) (values
);
231 agp_bridge
->aperture_size_idx
= 0;
232 return values
[0].size
;
238 static int intel_i810_configure(void)
240 struct aper_size_info_fixed
*current_size
;
244 current_size
= A_SIZE_FIX(agp_bridge
->current_size
);
246 if (!intel_private
.registers
) {
247 pci_read_config_dword(intel_private
.pcidev
, I810_MMADDR
, &temp
);
250 intel_private
.registers
= ioremap(temp
, 128 * 4096);
251 if (!intel_private
.registers
) {
252 dev_err(&intel_private
.pcidev
->dev
,
253 "can't remap memory\n");
258 if ((readl(intel_private
.registers
+I810_DRAM_CTL
)
259 & I810_DRAM_ROW_0
) == I810_DRAM_ROW_0_SDRAM
) {
260 /* This will need to be dynamically assigned */
261 dev_info(&intel_private
.pcidev
->dev
,
262 "detected 4MB dedicated video ram\n");
263 intel_private
.num_dcache_entries
= 1024;
265 pci_read_config_dword(intel_private
.pcidev
, I810_GMADDR
, &temp
);
266 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
267 writel(agp_bridge
->gatt_bus_addr
| I810_PGETBL_ENABLED
, intel_private
.registers
+I810_PGETBL_CTL
);
268 readl(intel_private
.registers
+I810_PGETBL_CTL
); /* PCI Posting. */
270 if (agp_bridge
->driver
->needs_scratch_page
) {
271 for (i
= 0; i
< current_size
->num_entries
; i
++) {
272 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
274 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4)); /* PCI posting. */
276 global_cache_flush();
280 static void intel_i810_cleanup(void)
282 writel(0, intel_private
.registers
+I810_PGETBL_CTL
);
283 readl(intel_private
.registers
); /* PCI Posting. */
284 iounmap(intel_private
.registers
);
287 static void intel_i810_agp_enable(struct agp_bridge_data
*bridge
, u32 mode
)
292 /* Exists to support ARGB cursors */
293 static struct page
*i8xx_alloc_pages(void)
297 page
= alloc_pages(GFP_KERNEL
| GFP_DMA32
, 2);
301 if (set_pages_uc(page
, 4) < 0) {
302 set_pages_wb(page
, 4);
303 __free_pages(page
, 2);
307 atomic_inc(&agp_bridge
->current_memory_agp
);
311 static void i8xx_destroy_pages(struct page
*page
)
316 set_pages_wb(page
, 4);
318 __free_pages(page
, 2);
319 atomic_dec(&agp_bridge
->current_memory_agp
);
322 static int intel_i830_type_to_mask_type(struct agp_bridge_data
*bridge
,
325 if (type
< AGP_USER_TYPES
)
327 else if (type
== AGP_USER_CACHED_MEMORY
)
328 return INTEL_AGP_CACHED_MEMORY
;
333 static int intel_gen6_type_to_mask_type(struct agp_bridge_data
*bridge
,
336 unsigned int type_mask
= type
& ~AGP_USER_CACHED_MEMORY_GFDT
;
337 unsigned int gfdt
= type
& AGP_USER_CACHED_MEMORY_GFDT
;
339 if (type_mask
== AGP_USER_UNCACHED_MEMORY
)
340 return INTEL_AGP_UNCACHED_MEMORY
;
341 else if (type_mask
== AGP_USER_CACHED_MEMORY_LLC_MLC
)
342 return gfdt
? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT
:
343 INTEL_AGP_CACHED_MEMORY_LLC_MLC
;
344 else /* set 'normal'/'cached' to LLC by default */
345 return gfdt
? INTEL_AGP_CACHED_MEMORY_LLC_GFDT
:
346 INTEL_AGP_CACHED_MEMORY_LLC
;
350 static int intel_i810_insert_entries(struct agp_memory
*mem
, off_t pg_start
,
353 int i
, j
, num_entries
;
358 if (mem
->page_count
== 0)
361 temp
= agp_bridge
->current_size
;
362 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
364 if ((pg_start
+ mem
->page_count
) > num_entries
)
368 for (j
= pg_start
; j
< (pg_start
+ mem
->page_count
); j
++) {
369 if (!PGE_EMPTY(agp_bridge
, readl(agp_bridge
->gatt_table
+j
))) {
375 if (type
!= mem
->type
)
378 mask_type
= agp_bridge
->driver
->agp_type_to_mask_type(agp_bridge
, type
);
381 case AGP_DCACHE_MEMORY
:
382 if (!mem
->is_flushed
)
383 global_cache_flush();
384 for (i
= pg_start
; i
< (pg_start
+ mem
->page_count
); i
++) {
385 writel((i
*4096)|I810_PTE_LOCAL
|I810_PTE_VALID
,
386 intel_private
.registers
+I810_PTE_BASE
+(i
*4));
388 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4));
390 case AGP_PHYS_MEMORY
:
391 case AGP_NORMAL_MEMORY
:
392 if (!mem
->is_flushed
)
393 global_cache_flush();
394 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
395 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
396 page_to_phys(mem
->pages
[i
]), mask_type
),
397 intel_private
.registers
+I810_PTE_BASE
+(j
*4));
399 readl(intel_private
.registers
+I810_PTE_BASE
+((j
-1)*4));
408 mem
->is_flushed
= true;
412 static int intel_i810_remove_entries(struct agp_memory
*mem
, off_t pg_start
,
417 if (mem
->page_count
== 0)
420 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
421 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
423 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4));
429 * The i810/i830 requires a physical address to program its mouse
430 * pointer into hardware.
431 * However the Xserver still writes to it through the agp aperture.
433 static struct agp_memory
*alloc_agpphysmem_i8xx(size_t pg_count
, int type
)
435 struct agp_memory
*new;
439 case 1: page
= agp_bridge
->driver
->agp_alloc_page(agp_bridge
);
442 /* kludge to get 4 physical pages for ARGB cursor */
443 page
= i8xx_alloc_pages();
452 new = agp_create_memory(pg_count
);
456 new->pages
[0] = page
;
458 /* kludge to get 4 physical pages for ARGB cursor */
459 new->pages
[1] = new->pages
[0] + 1;
460 new->pages
[2] = new->pages
[1] + 1;
461 new->pages
[3] = new->pages
[2] + 1;
463 new->page_count
= pg_count
;
464 new->num_scratch_pages
= pg_count
;
465 new->type
= AGP_PHYS_MEMORY
;
466 new->physical
= page_to_phys(new->pages
[0]);
470 static struct agp_memory
*intel_i810_alloc_by_type(size_t pg_count
, int type
)
472 struct agp_memory
*new;
474 if (type
== AGP_DCACHE_MEMORY
) {
475 if (pg_count
!= intel_private
.num_dcache_entries
)
478 new = agp_create_memory(1);
482 new->type
= AGP_DCACHE_MEMORY
;
483 new->page_count
= pg_count
;
484 new->num_scratch_pages
= 0;
485 agp_free_page_array(new);
488 if (type
== AGP_PHYS_MEMORY
)
489 return alloc_agpphysmem_i8xx(pg_count
, type
);
493 static void intel_i810_free_by_type(struct agp_memory
*curr
)
495 agp_free_key(curr
->key
);
496 if (curr
->type
== AGP_PHYS_MEMORY
) {
497 if (curr
->page_count
== 4)
498 i8xx_destroy_pages(curr
->pages
[0]);
500 agp_bridge
->driver
->agp_destroy_page(curr
->pages
[0],
501 AGP_PAGE_DESTROY_UNMAP
);
502 agp_bridge
->driver
->agp_destroy_page(curr
->pages
[0],
503 AGP_PAGE_DESTROY_FREE
);
505 agp_free_page_array(curr
);
510 static unsigned long intel_i810_mask_memory(struct agp_bridge_data
*bridge
,
511 dma_addr_t addr
, int type
)
513 /* Type checking must be done elsewhere */
514 return addr
| bridge
->driver
->masks
[type
].mask
;
517 static struct aper_size_info_fixed intel_i830_sizes
[] =
520 /* The 64M mode still requires a 128k gatt */
526 static void intel_i830_init_gtt_entries(void)
532 static const int ddt
[4] = { 0, 16, 32, 64 };
533 int size
; /* reserved space (in kb) at the top of stolen memory */
535 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
537 if (IS_G33
|| IS_I965
) {
539 pgetbl_ctl
= readl(intel_private
.registers
+I810_PGETBL_CTL
);
541 /* The 965 has a field telling us the size of the GTT,
542 * which may be larger than what is necessary to map the
545 switch (pgetbl_ctl
& I965_PGETBL_SIZE_MASK
) {
546 case I965_PGETBL_SIZE_128KB
:
549 case I965_PGETBL_SIZE_256KB
:
552 case I965_PGETBL_SIZE_512KB
:
555 case I965_PGETBL_SIZE_1MB
:
558 case I965_PGETBL_SIZE_2MB
:
561 case I965_PGETBL_SIZE_1_5MB
:
565 dev_info(&intel_private
.pcidev
->dev
,
566 "unknown page table size, assuming 512KB\n");
569 size
+= 4; /* add in BIOS popup space */
570 } else if (IS_G4X
|| IS_PINEVIEW
) {
571 /* On 4 series hardware, GTT stolen is separate from graphics
572 * stolen, ignore it in stolen gtt entries counting. However,
573 * 4KB of the stolen memory doesn't get mapped to the GTT.
577 /* On previous hardware, the GTT size was just what was
578 * required to map the aperture.
580 size
= agp_bridge
->driver
->fetch_size() + 4;
583 if (agp_bridge
->dev
->device
== PCI_DEVICE_ID_INTEL_82830_HB
||
584 agp_bridge
->dev
->device
== PCI_DEVICE_ID_INTEL_82845G_HB
) {
585 switch (gmch_ctrl
& I830_GMCH_GMS_MASK
) {
586 case I830_GMCH_GMS_STOLEN_512
:
587 gtt_entries
= KB(512) - KB(size
);
589 case I830_GMCH_GMS_STOLEN_1024
:
590 gtt_entries
= MB(1) - KB(size
);
592 case I830_GMCH_GMS_STOLEN_8192
:
593 gtt_entries
= MB(8) - KB(size
);
595 case I830_GMCH_GMS_LOCAL
:
596 rdct
= readb(intel_private
.registers
+I830_RDRAM_CHANNEL_TYPE
);
597 gtt_entries
= (I830_RDRAM_ND(rdct
) + 1) *
598 MB(ddt
[I830_RDRAM_DDT(rdct
)]);
607 * SandyBridge has new memory control reg at 0x50.w
610 pci_read_config_word(intel_private
.pcidev
, SNB_GMCH_CTRL
, &snb_gmch_ctl
);
611 switch (snb_gmch_ctl
& SNB_GMCH_GMS_STOLEN_MASK
) {
612 case SNB_GMCH_GMS_STOLEN_32M
:
613 gtt_entries
= MB(32) - KB(size
);
615 case SNB_GMCH_GMS_STOLEN_64M
:
616 gtt_entries
= MB(64) - KB(size
);
618 case SNB_GMCH_GMS_STOLEN_96M
:
619 gtt_entries
= MB(96) - KB(size
);
621 case SNB_GMCH_GMS_STOLEN_128M
:
622 gtt_entries
= MB(128) - KB(size
);
624 case SNB_GMCH_GMS_STOLEN_160M
:
625 gtt_entries
= MB(160) - KB(size
);
627 case SNB_GMCH_GMS_STOLEN_192M
:
628 gtt_entries
= MB(192) - KB(size
);
630 case SNB_GMCH_GMS_STOLEN_224M
:
631 gtt_entries
= MB(224) - KB(size
);
633 case SNB_GMCH_GMS_STOLEN_256M
:
634 gtt_entries
= MB(256) - KB(size
);
636 case SNB_GMCH_GMS_STOLEN_288M
:
637 gtt_entries
= MB(288) - KB(size
);
639 case SNB_GMCH_GMS_STOLEN_320M
:
640 gtt_entries
= MB(320) - KB(size
);
642 case SNB_GMCH_GMS_STOLEN_352M
:
643 gtt_entries
= MB(352) - KB(size
);
645 case SNB_GMCH_GMS_STOLEN_384M
:
646 gtt_entries
= MB(384) - KB(size
);
648 case SNB_GMCH_GMS_STOLEN_416M
:
649 gtt_entries
= MB(416) - KB(size
);
651 case SNB_GMCH_GMS_STOLEN_448M
:
652 gtt_entries
= MB(448) - KB(size
);
654 case SNB_GMCH_GMS_STOLEN_480M
:
655 gtt_entries
= MB(480) - KB(size
);
657 case SNB_GMCH_GMS_STOLEN_512M
:
658 gtt_entries
= MB(512) - KB(size
);
662 switch (gmch_ctrl
& I855_GMCH_GMS_MASK
) {
663 case I855_GMCH_GMS_STOLEN_1M
:
664 gtt_entries
= MB(1) - KB(size
);
666 case I855_GMCH_GMS_STOLEN_4M
:
667 gtt_entries
= MB(4) - KB(size
);
669 case I855_GMCH_GMS_STOLEN_8M
:
670 gtt_entries
= MB(8) - KB(size
);
672 case I855_GMCH_GMS_STOLEN_16M
:
673 gtt_entries
= MB(16) - KB(size
);
675 case I855_GMCH_GMS_STOLEN_32M
:
676 gtt_entries
= MB(32) - KB(size
);
678 case I915_GMCH_GMS_STOLEN_48M
:
679 /* Check it's really I915G */
680 if (IS_I915
|| IS_I965
|| IS_G33
|| IS_G4X
)
681 gtt_entries
= MB(48) - KB(size
);
685 case I915_GMCH_GMS_STOLEN_64M
:
686 /* Check it's really I915G */
687 if (IS_I915
|| IS_I965
|| IS_G33
|| IS_G4X
)
688 gtt_entries
= MB(64) - KB(size
);
692 case G33_GMCH_GMS_STOLEN_128M
:
693 if (IS_G33
|| IS_I965
|| IS_G4X
)
694 gtt_entries
= MB(128) - KB(size
);
698 case G33_GMCH_GMS_STOLEN_256M
:
699 if (IS_G33
|| IS_I965
|| IS_G4X
)
700 gtt_entries
= MB(256) - KB(size
);
704 case INTEL_GMCH_GMS_STOLEN_96M
:
705 if (IS_I965
|| IS_G4X
)
706 gtt_entries
= MB(96) - KB(size
);
710 case INTEL_GMCH_GMS_STOLEN_160M
:
711 if (IS_I965
|| IS_G4X
)
712 gtt_entries
= MB(160) - KB(size
);
716 case INTEL_GMCH_GMS_STOLEN_224M
:
717 if (IS_I965
|| IS_G4X
)
718 gtt_entries
= MB(224) - KB(size
);
722 case INTEL_GMCH_GMS_STOLEN_352M
:
723 if (IS_I965
|| IS_G4X
)
724 gtt_entries
= MB(352) - KB(size
);
733 if (!local
&& gtt_entries
> intel_max_stolen
) {
734 dev_info(&agp_bridge
->dev
->dev
,
735 "detected %dK stolen memory, trimming to %dK\n",
736 gtt_entries
/ KB(1), intel_max_stolen
/ KB(1));
737 gtt_entries
= intel_max_stolen
/ KB(4);
738 } else if (gtt_entries
> 0) {
739 dev_info(&agp_bridge
->dev
->dev
, "detected %dK %s memory\n",
740 gtt_entries
/ KB(1), local
? "local" : "stolen");
741 gtt_entries
/= KB(4);
743 dev_info(&agp_bridge
->dev
->dev
,
744 "no pre-allocated video memory detected\n");
748 intel_private
.gtt_entries
= gtt_entries
;
751 static void intel_i830_fini_flush(void)
753 kunmap(intel_private
.i8xx_page
);
754 intel_private
.i8xx_flush_page
= NULL
;
755 unmap_page_from_agp(intel_private
.i8xx_page
);
757 __free_page(intel_private
.i8xx_page
);
758 intel_private
.i8xx_page
= NULL
;
761 static void intel_i830_setup_flush(void)
763 /* return if we've already set the flush mechanism up */
764 if (intel_private
.i8xx_page
)
767 intel_private
.i8xx_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
| GFP_DMA32
);
768 if (!intel_private
.i8xx_page
)
771 intel_private
.i8xx_flush_page
= kmap(intel_private
.i8xx_page
);
772 if (!intel_private
.i8xx_flush_page
)
773 intel_i830_fini_flush();
776 /* The chipset_flush interface needs to get data that has already been
777 * flushed out of the CPU all the way out to main memory, because the GPU
778 * doesn't snoop those buffers.
780 * The 8xx series doesn't have the same lovely interface for flushing the
781 * chipset write buffers that the later chips do. According to the 865
782 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
783 * that buffer out, we just fill 1KB and clflush it out, on the assumption
784 * that it'll push whatever was in there out. It appears to work.
786 static void intel_i830_chipset_flush(struct agp_bridge_data
*bridge
)
788 unsigned int *pg
= intel_private
.i8xx_flush_page
;
793 clflush_cache_range(pg
, 1024);
794 else if (wbinvd_on_all_cpus() != 0)
795 printk(KERN_ERR
"Timed out waiting for cache flush.\n");
798 /* The intel i830 automatically initializes the agp aperture during POST.
799 * Use the memory already set aside for in the GTT.
801 static int intel_i830_create_gatt_table(struct agp_bridge_data
*bridge
)
804 struct aper_size_info_fixed
*size
;
808 size
= agp_bridge
->current_size
;
809 page_order
= size
->page_order
;
810 num_entries
= size
->num_entries
;
811 agp_bridge
->gatt_table_real
= NULL
;
813 pci_read_config_dword(intel_private
.pcidev
, I810_MMADDR
, &temp
);
816 intel_private
.registers
= ioremap(temp
, 128 * 4096);
817 if (!intel_private
.registers
)
820 temp
= readl(intel_private
.registers
+I810_PGETBL_CTL
) & 0xfffff000;
821 global_cache_flush();
823 /* we have to call this as early as possible after the MMIO base address is known */
824 intel_i830_init_gtt_entries();
825 if (intel_private
.gtt_entries
== 0) {
826 iounmap(intel_private
.registers
);
830 agp_bridge
->gatt_table
= NULL
;
832 agp_bridge
->gatt_bus_addr
= temp
;
837 /* Return the gatt table to a sane state. Use the top of stolen
838 * memory for the GTT.
840 static int intel_i830_free_gatt_table(struct agp_bridge_data
*bridge
)
845 static int intel_i830_fetch_size(void)
848 struct aper_size_info_fixed
*values
;
850 values
= A_SIZE_FIX(agp_bridge
->driver
->aperture_sizes
);
852 if (agp_bridge
->dev
->device
!= PCI_DEVICE_ID_INTEL_82830_HB
&&
853 agp_bridge
->dev
->device
!= PCI_DEVICE_ID_INTEL_82845G_HB
) {
854 /* 855GM/852GM/865G has 128MB aperture size */
855 agp_bridge
->current_size
= (void *) values
;
856 agp_bridge
->aperture_size_idx
= 0;
857 return values
[0].size
;
860 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
862 if ((gmch_ctrl
& I830_GMCH_MEM_MASK
) == I830_GMCH_MEM_128M
) {
863 agp_bridge
->current_size
= (void *) values
;
864 agp_bridge
->aperture_size_idx
= 0;
865 return values
[0].size
;
867 agp_bridge
->current_size
= (void *) (values
+ 1);
868 agp_bridge
->aperture_size_idx
= 1;
869 return values
[1].size
;
875 static int intel_i830_configure(void)
877 struct aper_size_info_fixed
*current_size
;
882 current_size
= A_SIZE_FIX(agp_bridge
->current_size
);
884 pci_read_config_dword(intel_private
.pcidev
, I810_GMADDR
, &temp
);
885 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
887 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
888 gmch_ctrl
|= I830_GMCH_ENABLED
;
889 pci_write_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, gmch_ctrl
);
891 writel(agp_bridge
->gatt_bus_addr
|I810_PGETBL_ENABLED
, intel_private
.registers
+I810_PGETBL_CTL
);
892 readl(intel_private
.registers
+I810_PGETBL_CTL
); /* PCI Posting. */
894 if (agp_bridge
->driver
->needs_scratch_page
) {
895 for (i
= intel_private
.gtt_entries
; i
< current_size
->num_entries
; i
++) {
896 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
898 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4)); /* PCI Posting. */
901 global_cache_flush();
903 intel_i830_setup_flush();
907 static void intel_i830_cleanup(void)
909 iounmap(intel_private
.registers
);
912 static int intel_i830_insert_entries(struct agp_memory
*mem
, off_t pg_start
,
915 int i
, j
, num_entries
;
920 if (mem
->page_count
== 0)
923 temp
= agp_bridge
->current_size
;
924 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
926 if (pg_start
< intel_private
.gtt_entries
) {
927 dev_printk(KERN_DEBUG
, &intel_private
.pcidev
->dev
,
928 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
929 pg_start
, intel_private
.gtt_entries
);
931 dev_info(&intel_private
.pcidev
->dev
,
932 "trying to insert into local/stolen memory\n");
936 if ((pg_start
+ mem
->page_count
) > num_entries
)
939 /* The i830 can't check the GTT for entries since its read only,
940 * depend on the caller to make the correct offset decisions.
943 if (type
!= mem
->type
)
946 mask_type
= agp_bridge
->driver
->agp_type_to_mask_type(agp_bridge
, type
);
948 if (mask_type
!= 0 && mask_type
!= AGP_PHYS_MEMORY
&&
949 mask_type
!= INTEL_AGP_CACHED_MEMORY
)
952 if (!mem
->is_flushed
)
953 global_cache_flush();
955 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
956 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
957 page_to_phys(mem
->pages
[i
]), mask_type
),
958 intel_private
.registers
+I810_PTE_BASE
+(j
*4));
960 readl(intel_private
.registers
+I810_PTE_BASE
+((j
-1)*4));
965 mem
->is_flushed
= true;
969 static int intel_i830_remove_entries(struct agp_memory
*mem
, off_t pg_start
,
974 if (mem
->page_count
== 0)
977 if (pg_start
< intel_private
.gtt_entries
) {
978 dev_info(&intel_private
.pcidev
->dev
,
979 "trying to disable local/stolen memory\n");
983 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
984 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
986 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4));
991 static struct agp_memory
*intel_i830_alloc_by_type(size_t pg_count
, int type
)
993 if (type
== AGP_PHYS_MEMORY
)
994 return alloc_agpphysmem_i8xx(pg_count
, type
);
995 /* always return NULL for other allocation types for now */
999 static int intel_alloc_chipset_flush_resource(void)
1002 ret
= pci_bus_alloc_resource(agp_bridge
->dev
->bus
, &intel_private
.ifp_resource
, PAGE_SIZE
,
1003 PAGE_SIZE
, PCIBIOS_MIN_MEM
, 0,
1004 pcibios_align_resource
, agp_bridge
->dev
);
1009 static void intel_i915_setup_chipset_flush(void)
1014 pci_read_config_dword(agp_bridge
->dev
, I915_IFPADDR
, &temp
);
1015 if (!(temp
& 0x1)) {
1016 intel_alloc_chipset_flush_resource();
1017 intel_private
.resource_valid
= 1;
1018 pci_write_config_dword(agp_bridge
->dev
, I915_IFPADDR
, (intel_private
.ifp_resource
.start
& 0xffffffff) | 0x1);
1022 intel_private
.resource_valid
= 1;
1023 intel_private
.ifp_resource
.start
= temp
;
1024 intel_private
.ifp_resource
.end
= temp
+ PAGE_SIZE
;
1025 ret
= request_resource(&iomem_resource
, &intel_private
.ifp_resource
);
1026 /* some BIOSes reserve this area in a pnp some don't */
1028 intel_private
.resource_valid
= 0;
1032 static void intel_i965_g33_setup_chipset_flush(void)
1034 u32 temp_hi
, temp_lo
;
1037 pci_read_config_dword(agp_bridge
->dev
, I965_IFPADDR
+ 4, &temp_hi
);
1038 pci_read_config_dword(agp_bridge
->dev
, I965_IFPADDR
, &temp_lo
);
1040 if (!(temp_lo
& 0x1)) {
1042 intel_alloc_chipset_flush_resource();
1044 intel_private
.resource_valid
= 1;
1045 pci_write_config_dword(agp_bridge
->dev
, I965_IFPADDR
+ 4,
1046 upper_32_bits(intel_private
.ifp_resource
.start
));
1047 pci_write_config_dword(agp_bridge
->dev
, I965_IFPADDR
, (intel_private
.ifp_resource
.start
& 0xffffffff) | 0x1);
1052 l64
= ((u64
)temp_hi
<< 32) | temp_lo
;
1054 intel_private
.resource_valid
= 1;
1055 intel_private
.ifp_resource
.start
= l64
;
1056 intel_private
.ifp_resource
.end
= l64
+ PAGE_SIZE
;
1057 ret
= request_resource(&iomem_resource
, &intel_private
.ifp_resource
);
1058 /* some BIOSes reserve this area in a pnp some don't */
1060 intel_private
.resource_valid
= 0;
1064 static void intel_i9xx_setup_flush(void)
1066 /* return if already configured */
1067 if (intel_private
.ifp_resource
.start
)
1073 /* setup a resource for this object */
1074 intel_private
.ifp_resource
.name
= "Intel Flush Page";
1075 intel_private
.ifp_resource
.flags
= IORESOURCE_MEM
;
1077 /* Setup chipset flush for 915 */
1078 if (IS_I965
|| IS_G33
|| IS_G4X
) {
1079 intel_i965_g33_setup_chipset_flush();
1081 intel_i915_setup_chipset_flush();
1084 if (intel_private
.ifp_resource
.start
)
1085 intel_private
.i9xx_flush_page
= ioremap_nocache(intel_private
.ifp_resource
.start
, PAGE_SIZE
);
1086 if (!intel_private
.i9xx_flush_page
)
1087 dev_err(&intel_private
.pcidev
->dev
,
1088 "can't ioremap flush page - no chipset flushing\n");
1091 static int intel_i9xx_configure(void)
1093 struct aper_size_info_fixed
*current_size
;
1098 current_size
= A_SIZE_FIX(agp_bridge
->current_size
);
1100 pci_read_config_dword(intel_private
.pcidev
, I915_GMADDR
, &temp
);
1102 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1104 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
1105 gmch_ctrl
|= I830_GMCH_ENABLED
;
1106 pci_write_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, gmch_ctrl
);
1108 writel(agp_bridge
->gatt_bus_addr
|I810_PGETBL_ENABLED
, intel_private
.registers
+I810_PGETBL_CTL
);
1109 readl(intel_private
.registers
+I810_PGETBL_CTL
); /* PCI Posting. */
1111 if (agp_bridge
->driver
->needs_scratch_page
) {
1112 for (i
= intel_private
.gtt_entries
; i
< intel_private
.gtt_total_size
; i
++) {
1113 writel(agp_bridge
->scratch_page
, intel_private
.gtt
+i
);
1115 readl(intel_private
.gtt
+i
-1); /* PCI Posting. */
1118 global_cache_flush();
1120 intel_i9xx_setup_flush();
1125 static void intel_i915_cleanup(void)
1127 if (intel_private
.i9xx_flush_page
)
1128 iounmap(intel_private
.i9xx_flush_page
);
1129 if (intel_private
.resource_valid
)
1130 release_resource(&intel_private
.ifp_resource
);
1131 intel_private
.ifp_resource
.start
= 0;
1132 intel_private
.resource_valid
= 0;
1133 iounmap(intel_private
.gtt
);
1134 iounmap(intel_private
.registers
);
1137 static void intel_i915_chipset_flush(struct agp_bridge_data
*bridge
)
1139 if (intel_private
.i9xx_flush_page
)
1140 writel(1, intel_private
.i9xx_flush_page
);
1143 static int intel_i915_insert_entries(struct agp_memory
*mem
, off_t pg_start
,
1151 if (mem
->page_count
== 0)
1154 temp
= agp_bridge
->current_size
;
1155 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
1157 if (pg_start
< intel_private
.gtt_entries
) {
1158 dev_printk(KERN_DEBUG
, &intel_private
.pcidev
->dev
,
1159 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1160 pg_start
, intel_private
.gtt_entries
);
1162 dev_info(&intel_private
.pcidev
->dev
,
1163 "trying to insert into local/stolen memory\n");
1167 if ((pg_start
+ mem
->page_count
) > num_entries
)
1170 /* The i915 can't check the GTT for entries since it's read only;
1171 * depend on the caller to make the correct offset decisions.
1174 if (type
!= mem
->type
)
1177 mask_type
= agp_bridge
->driver
->agp_type_to_mask_type(agp_bridge
, type
);
1179 if (!IS_SNB
&& mask_type
!= 0 && mask_type
!= AGP_PHYS_MEMORY
&&
1180 mask_type
!= INTEL_AGP_CACHED_MEMORY
)
1183 if (!mem
->is_flushed
)
1184 global_cache_flush();
1186 intel_agp_insert_sg_entries(mem
, pg_start
, mask_type
);
1191 mem
->is_flushed
= true;
1195 static int intel_i915_remove_entries(struct agp_memory
*mem
, off_t pg_start
,
1200 if (mem
->page_count
== 0)
1203 if (pg_start
< intel_private
.gtt_entries
) {
1204 dev_info(&intel_private
.pcidev
->dev
,
1205 "trying to disable local/stolen memory\n");
1209 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++)
1210 writel(agp_bridge
->scratch_page
, intel_private
.gtt
+i
);
1212 readl(intel_private
.gtt
+i
-1);
1217 /* Return the aperture size by just checking the resource length. The effect
1218 * described in the spec of the MSAC registers is just changing of the
1221 static int intel_i9xx_fetch_size(void)
1223 int num_sizes
= ARRAY_SIZE(intel_i830_sizes
);
1224 int aper_size
; /* size in megabytes */
1227 aper_size
= pci_resource_len(intel_private
.pcidev
, 2) / MB(1);
1229 for (i
= 0; i
< num_sizes
; i
++) {
1230 if (aper_size
== intel_i830_sizes
[i
].size
) {
1231 agp_bridge
->current_size
= intel_i830_sizes
+ i
;
1239 static int intel_i915_get_gtt_size(void)
1245 pgetbl_ctl
= readl(intel_private
.registers
+I810_PGETBL_CTL
);
1247 switch (pgetbl_ctl
& I965_PGETBL_SIZE_MASK
) {
1248 case I965_PGETBL_SIZE_128KB
:
1251 case I965_PGETBL_SIZE_256KB
:
1254 case I965_PGETBL_SIZE_512KB
:
1257 case I965_PGETBL_SIZE_1MB
:
1260 case I965_PGETBL_SIZE_2MB
:
1263 case I965_PGETBL_SIZE_1_5MB
:
1267 dev_info(&intel_private
.pcidev
->dev
,
1268 "unknown page table size, assuming 512KB\n");
1272 /* On previous hardware, the GTT size was just what was
1273 * required to map the aperture.
1275 size
= agp_bridge
->driver
->fetch_size();
1281 /* The intel i915 automatically initializes the agp aperture during POST.
1282 * Use the memory already set aside for in the GTT.
1284 static int intel_i915_create_gatt_table(struct agp_bridge_data
*bridge
)
1287 struct aper_size_info_fixed
*size
;
1292 size
= agp_bridge
->current_size
;
1293 page_order
= size
->page_order
;
1294 num_entries
= size
->num_entries
;
1295 agp_bridge
->gatt_table_real
= NULL
;
1297 pci_read_config_dword(intel_private
.pcidev
, I915_MMADDR
, &temp
);
1298 pci_read_config_dword(intel_private
.pcidev
, I915_PTEADDR
, &temp2
);
1302 intel_private
.registers
= ioremap(temp
, 128 * 4096);
1303 if (!intel_private
.registers
) {
1304 iounmap(intel_private
.gtt
);
1308 gtt_map_size
= intel_i915_get_gtt_size();
1310 intel_private
.gtt
= ioremap(temp2
, gtt_map_size
);
1311 if (!intel_private
.gtt
)
1314 intel_private
.gtt_total_size
= gtt_map_size
/ 4;
1316 temp
= readl(intel_private
.registers
+I810_PGETBL_CTL
) & 0xfffff000;
1317 global_cache_flush();
1319 /* we have to call this as early as possible after the MMIO base address is known */
1320 intel_i830_init_gtt_entries();
1321 if (intel_private
.gtt_entries
== 0) {
1322 iounmap(intel_private
.gtt
);
1323 iounmap(intel_private
.registers
);
1327 agp_bridge
->gatt_table
= NULL
;
1329 agp_bridge
->gatt_bus_addr
= temp
;
1335 * The i965 supports 36-bit physical addresses, but to keep
1336 * the format of the GTT the same, the bits that don't fit
1337 * in a 32-bit word are shifted down to bits 4..7.
1339 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1340 * is always zero on 32-bit architectures, so no need to make
1343 static unsigned long intel_i965_mask_memory(struct agp_bridge_data
*bridge
,
1344 dma_addr_t addr
, int type
)
1346 /* Shift high bits down */
1347 addr
|= (addr
>> 28) & 0xf0;
1349 /* Type checking must be done elsewhere */
1350 return addr
| bridge
->driver
->masks
[type
].mask
;
1353 static unsigned long intel_gen6_mask_memory(struct agp_bridge_data
*bridge
,
1354 dma_addr_t addr
, int type
)
1356 /* gen6 has bit11-4 for physical addr bit39-32 */
1357 addr
|= (addr
>> 28) & 0xff0;
1359 /* Type checking must be done elsewhere */
1360 return addr
| bridge
->driver
->masks
[type
].mask
;
1363 static void intel_i965_get_gtt_range(int *gtt_offset
, int *gtt_size
)
1367 switch (agp_bridge
->dev
->device
) {
1368 case PCI_DEVICE_ID_INTEL_GM45_HB
:
1369 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB
:
1370 case PCI_DEVICE_ID_INTEL_Q45_HB
:
1371 case PCI_DEVICE_ID_INTEL_G45_HB
:
1372 case PCI_DEVICE_ID_INTEL_G41_HB
:
1373 case PCI_DEVICE_ID_INTEL_B43_HB
:
1374 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB
:
1375 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB
:
1376 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB
:
1377 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB
:
1378 *gtt_offset
= *gtt_size
= MB(2);
1380 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB
:
1381 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB
:
1382 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB
:
1383 *gtt_offset
= MB(2);
1385 pci_read_config_word(intel_private
.pcidev
, SNB_GMCH_CTRL
, &snb_gmch_ctl
);
1386 switch (snb_gmch_ctl
& SNB_GTT_SIZE_MASK
) {
1388 case SNB_GTT_SIZE_0M
:
1389 printk(KERN_ERR
"Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl
);
1392 case SNB_GTT_SIZE_1M
:
1395 case SNB_GTT_SIZE_2M
:
1401 *gtt_offset
= *gtt_size
= KB(512);
1405 /* The intel i965 automatically initializes the agp aperture during POST.
1406 * Use the memory already set aside for in the GTT.
1408 static int intel_i965_create_gatt_table(struct agp_bridge_data
*bridge
)
1411 struct aper_size_info_fixed
*size
;
1414 int gtt_offset
, gtt_size
;
1416 size
= agp_bridge
->current_size
;
1417 page_order
= size
->page_order
;
1418 num_entries
= size
->num_entries
;
1419 agp_bridge
->gatt_table_real
= NULL
;
1421 pci_read_config_dword(intel_private
.pcidev
, I915_MMADDR
, &temp
);
1425 intel_i965_get_gtt_range(>t_offset
, >t_size
);
1427 intel_private
.gtt
= ioremap((temp
+ gtt_offset
) , gtt_size
);
1429 if (!intel_private
.gtt
)
1432 intel_private
.gtt_total_size
= gtt_size
/ 4;
1434 intel_private
.registers
= ioremap(temp
, 128 * 4096);
1435 if (!intel_private
.registers
) {
1436 iounmap(intel_private
.gtt
);
1440 temp
= readl(intel_private
.registers
+I810_PGETBL_CTL
) & 0xfffff000;
1441 global_cache_flush();
1443 /* we have to call this as early as possible after the MMIO base address is known */
1444 intel_i830_init_gtt_entries();
1445 if (intel_private
.gtt_entries
== 0) {
1446 iounmap(intel_private
.gtt
);
1447 iounmap(intel_private
.registers
);
1451 agp_bridge
->gatt_table
= NULL
;
1453 agp_bridge
->gatt_bus_addr
= temp
;
1458 static const struct agp_bridge_driver intel_810_driver
= {
1459 .owner
= THIS_MODULE
,
1460 .aperture_sizes
= intel_i810_sizes
,
1461 .size_type
= FIXED_APER_SIZE
,
1462 .num_aperture_sizes
= 2,
1463 .needs_scratch_page
= true,
1464 .configure
= intel_i810_configure
,
1465 .fetch_size
= intel_i810_fetch_size
,
1466 .cleanup
= intel_i810_cleanup
,
1467 .mask_memory
= intel_i810_mask_memory
,
1468 .masks
= intel_i810_masks
,
1469 .agp_enable
= intel_i810_agp_enable
,
1470 .cache_flush
= global_cache_flush
,
1471 .create_gatt_table
= agp_generic_create_gatt_table
,
1472 .free_gatt_table
= agp_generic_free_gatt_table
,
1473 .insert_memory
= intel_i810_insert_entries
,
1474 .remove_memory
= intel_i810_remove_entries
,
1475 .alloc_by_type
= intel_i810_alloc_by_type
,
1476 .free_by_type
= intel_i810_free_by_type
,
1477 .agp_alloc_page
= agp_generic_alloc_page
,
1478 .agp_alloc_pages
= agp_generic_alloc_pages
,
1479 .agp_destroy_page
= agp_generic_destroy_page
,
1480 .agp_destroy_pages
= agp_generic_destroy_pages
,
1481 .agp_type_to_mask_type
= agp_generic_type_to_mask_type
,
1484 static const struct agp_bridge_driver intel_830_driver
= {
1485 .owner
= THIS_MODULE
,
1486 .aperture_sizes
= intel_i830_sizes
,
1487 .size_type
= FIXED_APER_SIZE
,
1488 .num_aperture_sizes
= 4,
1489 .needs_scratch_page
= true,
1490 .configure
= intel_i830_configure
,
1491 .fetch_size
= intel_i830_fetch_size
,
1492 .cleanup
= intel_i830_cleanup
,
1493 .mask_memory
= intel_i810_mask_memory
,
1494 .masks
= intel_i810_masks
,
1495 .agp_enable
= intel_i810_agp_enable
,
1496 .cache_flush
= global_cache_flush
,
1497 .create_gatt_table
= intel_i830_create_gatt_table
,
1498 .free_gatt_table
= intel_i830_free_gatt_table
,
1499 .insert_memory
= intel_i830_insert_entries
,
1500 .remove_memory
= intel_i830_remove_entries
,
1501 .alloc_by_type
= intel_i830_alloc_by_type
,
1502 .free_by_type
= intel_i810_free_by_type
,
1503 .agp_alloc_page
= agp_generic_alloc_page
,
1504 .agp_alloc_pages
= agp_generic_alloc_pages
,
1505 .agp_destroy_page
= agp_generic_destroy_page
,
1506 .agp_destroy_pages
= agp_generic_destroy_pages
,
1507 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1508 .chipset_flush
= intel_i830_chipset_flush
,
1511 static const struct agp_bridge_driver intel_915_driver
= {
1512 .owner
= THIS_MODULE
,
1513 .aperture_sizes
= intel_i830_sizes
,
1514 .size_type
= FIXED_APER_SIZE
,
1515 .num_aperture_sizes
= 4,
1516 .needs_scratch_page
= true,
1517 .configure
= intel_i9xx_configure
,
1518 .fetch_size
= intel_i9xx_fetch_size
,
1519 .cleanup
= intel_i915_cleanup
,
1520 .mask_memory
= intel_i810_mask_memory
,
1521 .masks
= intel_i810_masks
,
1522 .agp_enable
= intel_i810_agp_enable
,
1523 .cache_flush
= global_cache_flush
,
1524 .create_gatt_table
= intel_i915_create_gatt_table
,
1525 .free_gatt_table
= intel_i830_free_gatt_table
,
1526 .insert_memory
= intel_i915_insert_entries
,
1527 .remove_memory
= intel_i915_remove_entries
,
1528 .alloc_by_type
= intel_i830_alloc_by_type
,
1529 .free_by_type
= intel_i810_free_by_type
,
1530 .agp_alloc_page
= agp_generic_alloc_page
,
1531 .agp_alloc_pages
= agp_generic_alloc_pages
,
1532 .agp_destroy_page
= agp_generic_destroy_page
,
1533 .agp_destroy_pages
= agp_generic_destroy_pages
,
1534 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1535 .chipset_flush
= intel_i915_chipset_flush
,
1536 #ifdef USE_PCI_DMA_API
1537 .agp_map_page
= intel_agp_map_page
,
1538 .agp_unmap_page
= intel_agp_unmap_page
,
1539 .agp_map_memory
= intel_agp_map_memory
,
1540 .agp_unmap_memory
= intel_agp_unmap_memory
,
1544 static const struct agp_bridge_driver intel_i965_driver
= {
1545 .owner
= THIS_MODULE
,
1546 .aperture_sizes
= intel_i830_sizes
,
1547 .size_type
= FIXED_APER_SIZE
,
1548 .num_aperture_sizes
= 4,
1549 .needs_scratch_page
= true,
1550 .configure
= intel_i9xx_configure
,
1551 .fetch_size
= intel_i9xx_fetch_size
,
1552 .cleanup
= intel_i915_cleanup
,
1553 .mask_memory
= intel_i965_mask_memory
,
1554 .masks
= intel_i810_masks
,
1555 .agp_enable
= intel_i810_agp_enable
,
1556 .cache_flush
= global_cache_flush
,
1557 .create_gatt_table
= intel_i965_create_gatt_table
,
1558 .free_gatt_table
= intel_i830_free_gatt_table
,
1559 .insert_memory
= intel_i915_insert_entries
,
1560 .remove_memory
= intel_i915_remove_entries
,
1561 .alloc_by_type
= intel_i830_alloc_by_type
,
1562 .free_by_type
= intel_i810_free_by_type
,
1563 .agp_alloc_page
= agp_generic_alloc_page
,
1564 .agp_alloc_pages
= agp_generic_alloc_pages
,
1565 .agp_destroy_page
= agp_generic_destroy_page
,
1566 .agp_destroy_pages
= agp_generic_destroy_pages
,
1567 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1568 .chipset_flush
= intel_i915_chipset_flush
,
1569 #ifdef USE_PCI_DMA_API
1570 .agp_map_page
= intel_agp_map_page
,
1571 .agp_unmap_page
= intel_agp_unmap_page
,
1572 .agp_map_memory
= intel_agp_map_memory
,
1573 .agp_unmap_memory
= intel_agp_unmap_memory
,
1577 static const struct agp_bridge_driver intel_gen6_driver
= {
1578 .owner
= THIS_MODULE
,
1579 .aperture_sizes
= intel_i830_sizes
,
1580 .size_type
= FIXED_APER_SIZE
,
1581 .num_aperture_sizes
= 4,
1582 .needs_scratch_page
= true,
1583 .configure
= intel_i9xx_configure
,
1584 .fetch_size
= intel_i9xx_fetch_size
,
1585 .cleanup
= intel_i915_cleanup
,
1586 .mask_memory
= intel_gen6_mask_memory
,
1587 .masks
= intel_gen6_masks
,
1588 .agp_enable
= intel_i810_agp_enable
,
1589 .cache_flush
= global_cache_flush
,
1590 .create_gatt_table
= intel_i965_create_gatt_table
,
1591 .free_gatt_table
= intel_i830_free_gatt_table
,
1592 .insert_memory
= intel_i915_insert_entries
,
1593 .remove_memory
= intel_i915_remove_entries
,
1594 .alloc_by_type
= intel_i830_alloc_by_type
,
1595 .free_by_type
= intel_i810_free_by_type
,
1596 .agp_alloc_page
= agp_generic_alloc_page
,
1597 .agp_alloc_pages
= agp_generic_alloc_pages
,
1598 .agp_destroy_page
= agp_generic_destroy_page
,
1599 .agp_destroy_pages
= agp_generic_destroy_pages
,
1600 .agp_type_to_mask_type
= intel_gen6_type_to_mask_type
,
1601 .chipset_flush
= intel_i915_chipset_flush
,
1602 #ifdef USE_PCI_DMA_API
1603 .agp_map_page
= intel_agp_map_page
,
1604 .agp_unmap_page
= intel_agp_unmap_page
,
1605 .agp_map_memory
= intel_agp_map_memory
,
1606 .agp_unmap_memory
= intel_agp_unmap_memory
,
1610 static const struct agp_bridge_driver intel_g33_driver
= {
1611 .owner
= THIS_MODULE
,
1612 .aperture_sizes
= intel_i830_sizes
,
1613 .size_type
= FIXED_APER_SIZE
,
1614 .num_aperture_sizes
= 4,
1615 .needs_scratch_page
= true,
1616 .configure
= intel_i9xx_configure
,
1617 .fetch_size
= intel_i9xx_fetch_size
,
1618 .cleanup
= intel_i915_cleanup
,
1619 .mask_memory
= intel_i965_mask_memory
,
1620 .masks
= intel_i810_masks
,
1621 .agp_enable
= intel_i810_agp_enable
,
1622 .cache_flush
= global_cache_flush
,
1623 .create_gatt_table
= intel_i915_create_gatt_table
,
1624 .free_gatt_table
= intel_i830_free_gatt_table
,
1625 .insert_memory
= intel_i915_insert_entries
,
1626 .remove_memory
= intel_i915_remove_entries
,
1627 .alloc_by_type
= intel_i830_alloc_by_type
,
1628 .free_by_type
= intel_i810_free_by_type
,
1629 .agp_alloc_page
= agp_generic_alloc_page
,
1630 .agp_alloc_pages
= agp_generic_alloc_pages
,
1631 .agp_destroy_page
= agp_generic_destroy_page
,
1632 .agp_destroy_pages
= agp_generic_destroy_pages
,
1633 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1634 .chipset_flush
= intel_i915_chipset_flush
,
1635 #ifdef USE_PCI_DMA_API
1636 .agp_map_page
= intel_agp_map_page
,
1637 .agp_unmap_page
= intel_agp_unmap_page
,
1638 .agp_map_memory
= intel_agp_map_memory
,
1639 .agp_unmap_memory
= intel_agp_unmap_memory
,