2 * Intel GTT (Graphics Translation Table) routines
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
15 * /fairy-tale-mode off
19 * If we have Intel graphics, we're not going to have anything other than
20 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
21 * on the Intel IOMMU support (CONFIG_DMAR).
22 * Only newer chipsets need to bother with this, of course.
25 #define USE_PCI_DMA_API 1
28 static const struct aper_size_info_fixed intel_i810_sizes
[] =
31 /* The 32M mode still requires a 64k gatt */
35 #define AGP_DCACHE_MEMORY 1
36 #define AGP_PHYS_MEMORY 2
37 #define INTEL_AGP_CACHED_MEMORY 3
39 static struct gatt_mask intel_i810_masks
[] =
41 {.mask
= I810_PTE_VALID
, .type
= 0},
42 {.mask
= (I810_PTE_VALID
| I810_PTE_LOCAL
), .type
= AGP_DCACHE_MEMORY
},
43 {.mask
= I810_PTE_VALID
, .type
= 0},
44 {.mask
= I810_PTE_VALID
| I830_PTE_SYSTEM_CACHED
,
45 .type
= INTEL_AGP_CACHED_MEMORY
}
48 static struct _intel_private
{
49 struct pci_dev
*pcidev
; /* device one */
50 u8 __iomem
*registers
;
51 u32 __iomem
*gtt
; /* I915G */
52 int num_dcache_entries
;
53 /* gtt_entries is the number of gtt entries that are already mapped
54 * to stolen memory. Stolen memory is larger than the memory mapped
55 * through gtt_entries, as it includes some reserved space for the BIOS
56 * popup and for the GTT.
58 int gtt_entries
; /* i830+ */
61 void __iomem
*i9xx_flush_page
;
62 void *i8xx_flush_page
;
64 struct page
*i8xx_page
;
65 struct resource ifp_resource
;
69 #ifdef USE_PCI_DMA_API
70 static int intel_agp_map_page(struct page
*page
, dma_addr_t
*ret
)
72 *ret
= pci_map_page(intel_private
.pcidev
, page
, 0,
73 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
74 if (pci_dma_mapping_error(intel_private
.pcidev
, *ret
))
79 static void intel_agp_unmap_page(struct page
*page
, dma_addr_t dma
)
81 pci_unmap_page(intel_private
.pcidev
, dma
,
82 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
85 static void intel_agp_free_sglist(struct agp_memory
*mem
)
89 st
.sgl
= mem
->sg_list
;
90 st
.orig_nents
= st
.nents
= mem
->page_count
;
98 static int intel_agp_map_memory(struct agp_memory
*mem
)
101 struct scatterlist
*sg
;
104 DBG("try mapping %lu pages\n", (unsigned long)mem
->page_count
);
106 if (sg_alloc_table(&st
, mem
->page_count
, GFP_KERNEL
))
109 mem
->sg_list
= sg
= st
.sgl
;
111 for (i
= 0 ; i
< mem
->page_count
; i
++, sg
= sg_next(sg
))
112 sg_set_page(sg
, mem
->pages
[i
], PAGE_SIZE
, 0);
114 mem
->num_sg
= pci_map_sg(intel_private
.pcidev
, mem
->sg_list
,
115 mem
->page_count
, PCI_DMA_BIDIRECTIONAL
);
116 if (unlikely(!mem
->num_sg
)) {
117 intel_agp_free_sglist(mem
);
123 static void intel_agp_unmap_memory(struct agp_memory
*mem
)
125 DBG("try unmapping %lu pages\n", (unsigned long)mem
->page_count
);
127 pci_unmap_sg(intel_private
.pcidev
, mem
->sg_list
,
128 mem
->page_count
, PCI_DMA_BIDIRECTIONAL
);
129 intel_agp_free_sglist(mem
);
132 static void intel_agp_insert_sg_entries(struct agp_memory
*mem
,
133 off_t pg_start
, int mask_type
)
135 struct scatterlist
*sg
;
140 WARN_ON(!mem
->num_sg
);
142 if (mem
->num_sg
== mem
->page_count
) {
143 for_each_sg(mem
->sg_list
, sg
, mem
->page_count
, i
) {
144 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
145 sg_dma_address(sg
), mask_type
),
146 intel_private
.gtt
+j
);
150 /* sg may merge pages, but we have to separate
151 * per-page addr for GTT */
154 for_each_sg(mem
->sg_list
, sg
, mem
->num_sg
, i
) {
155 len
= sg_dma_len(sg
) / PAGE_SIZE
;
156 for (m
= 0; m
< len
; m
++) {
157 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
158 sg_dma_address(sg
) + m
* PAGE_SIZE
,
160 intel_private
.gtt
+j
);
165 readl(intel_private
.gtt
+j
-1);
170 static void intel_agp_insert_sg_entries(struct agp_memory
*mem
,
171 off_t pg_start
, int mask_type
)
176 if (agp_bridge
->dev
->device
== PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB
||
177 agp_bridge
->dev
->device
== PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB
)
179 cache_bits
= I830_PTE_SYSTEM_CACHED
;
182 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
183 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
184 page_to_phys(mem
->pages
[i
]), mask_type
),
185 intel_private
.gtt
+j
);
188 readl(intel_private
.gtt
+j
-1);
193 static int intel_i810_fetch_size(void)
196 struct aper_size_info_fixed
*values
;
198 pci_read_config_dword(agp_bridge
->dev
, I810_SMRAM_MISCC
, &smram_miscc
);
199 values
= A_SIZE_FIX(agp_bridge
->driver
->aperture_sizes
);
201 if ((smram_miscc
& I810_GMS
) == I810_GMS_DISABLE
) {
202 dev_warn(&agp_bridge
->dev
->dev
, "i810 is disabled\n");
205 if ((smram_miscc
& I810_GFX_MEM_WIN_SIZE
) == I810_GFX_MEM_WIN_32M
) {
206 agp_bridge
->current_size
= (void *) (values
+ 1);
207 agp_bridge
->aperture_size_idx
= 1;
208 return values
[1].size
;
210 agp_bridge
->current_size
= (void *) (values
);
211 agp_bridge
->aperture_size_idx
= 0;
212 return values
[0].size
;
218 static int intel_i810_configure(void)
220 struct aper_size_info_fixed
*current_size
;
224 current_size
= A_SIZE_FIX(agp_bridge
->current_size
);
226 if (!intel_private
.registers
) {
227 pci_read_config_dword(intel_private
.pcidev
, I810_MMADDR
, &temp
);
230 intel_private
.registers
= ioremap(temp
, 128 * 4096);
231 if (!intel_private
.registers
) {
232 dev_err(&intel_private
.pcidev
->dev
,
233 "can't remap memory\n");
238 if ((readl(intel_private
.registers
+I810_DRAM_CTL
)
239 & I810_DRAM_ROW_0
) == I810_DRAM_ROW_0_SDRAM
) {
240 /* This will need to be dynamically assigned */
241 dev_info(&intel_private
.pcidev
->dev
,
242 "detected 4MB dedicated video ram\n");
243 intel_private
.num_dcache_entries
= 1024;
245 pci_read_config_dword(intel_private
.pcidev
, I810_GMADDR
, &temp
);
246 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
247 writel(agp_bridge
->gatt_bus_addr
| I810_PGETBL_ENABLED
, intel_private
.registers
+I810_PGETBL_CTL
);
248 readl(intel_private
.registers
+I810_PGETBL_CTL
); /* PCI Posting. */
250 if (agp_bridge
->driver
->needs_scratch_page
) {
251 for (i
= 0; i
< current_size
->num_entries
; i
++) {
252 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
254 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4)); /* PCI posting. */
256 global_cache_flush();
260 static void intel_i810_cleanup(void)
262 writel(0, intel_private
.registers
+I810_PGETBL_CTL
);
263 readl(intel_private
.registers
); /* PCI Posting. */
264 iounmap(intel_private
.registers
);
267 static void intel_i810_agp_enable(struct agp_bridge_data
*bridge
, u32 mode
)
272 /* Exists to support ARGB cursors */
273 static struct page
*i8xx_alloc_pages(void)
277 page
= alloc_pages(GFP_KERNEL
| GFP_DMA32
, 2);
281 if (set_pages_uc(page
, 4) < 0) {
282 set_pages_wb(page
, 4);
283 __free_pages(page
, 2);
287 atomic_inc(&agp_bridge
->current_memory_agp
);
291 static void i8xx_destroy_pages(struct page
*page
)
296 set_pages_wb(page
, 4);
298 __free_pages(page
, 2);
299 atomic_dec(&agp_bridge
->current_memory_agp
);
302 static int intel_i830_type_to_mask_type(struct agp_bridge_data
*bridge
,
305 if (type
< AGP_USER_TYPES
)
307 else if (type
== AGP_USER_CACHED_MEMORY
)
308 return INTEL_AGP_CACHED_MEMORY
;
313 static int intel_i810_insert_entries(struct agp_memory
*mem
, off_t pg_start
,
316 int i
, j
, num_entries
;
321 if (mem
->page_count
== 0)
324 temp
= agp_bridge
->current_size
;
325 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
327 if ((pg_start
+ mem
->page_count
) > num_entries
)
331 for (j
= pg_start
; j
< (pg_start
+ mem
->page_count
); j
++) {
332 if (!PGE_EMPTY(agp_bridge
, readl(agp_bridge
->gatt_table
+j
))) {
338 if (type
!= mem
->type
)
341 mask_type
= agp_bridge
->driver
->agp_type_to_mask_type(agp_bridge
, type
);
344 case AGP_DCACHE_MEMORY
:
345 if (!mem
->is_flushed
)
346 global_cache_flush();
347 for (i
= pg_start
; i
< (pg_start
+ mem
->page_count
); i
++) {
348 writel((i
*4096)|I810_PTE_LOCAL
|I810_PTE_VALID
,
349 intel_private
.registers
+I810_PTE_BASE
+(i
*4));
351 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4));
353 case AGP_PHYS_MEMORY
:
354 case AGP_NORMAL_MEMORY
:
355 if (!mem
->is_flushed
)
356 global_cache_flush();
357 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
358 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
359 page_to_phys(mem
->pages
[i
]), mask_type
),
360 intel_private
.registers
+I810_PTE_BASE
+(j
*4));
362 readl(intel_private
.registers
+I810_PTE_BASE
+((j
-1)*4));
371 mem
->is_flushed
= true;
375 static int intel_i810_remove_entries(struct agp_memory
*mem
, off_t pg_start
,
380 if (mem
->page_count
== 0)
383 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
384 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
386 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4));
392 * The i810/i830 requires a physical address to program its mouse
393 * pointer into hardware.
394 * However the Xserver still writes to it through the agp aperture.
396 static struct agp_memory
*alloc_agpphysmem_i8xx(size_t pg_count
, int type
)
398 struct agp_memory
*new;
402 case 1: page
= agp_bridge
->driver
->agp_alloc_page(agp_bridge
);
405 /* kludge to get 4 physical pages for ARGB cursor */
406 page
= i8xx_alloc_pages();
415 new = agp_create_memory(pg_count
);
419 new->pages
[0] = page
;
421 /* kludge to get 4 physical pages for ARGB cursor */
422 new->pages
[1] = new->pages
[0] + 1;
423 new->pages
[2] = new->pages
[1] + 1;
424 new->pages
[3] = new->pages
[2] + 1;
426 new->page_count
= pg_count
;
427 new->num_scratch_pages
= pg_count
;
428 new->type
= AGP_PHYS_MEMORY
;
429 new->physical
= page_to_phys(new->pages
[0]);
433 static struct agp_memory
*intel_i810_alloc_by_type(size_t pg_count
, int type
)
435 struct agp_memory
*new;
437 if (type
== AGP_DCACHE_MEMORY
) {
438 if (pg_count
!= intel_private
.num_dcache_entries
)
441 new = agp_create_memory(1);
445 new->type
= AGP_DCACHE_MEMORY
;
446 new->page_count
= pg_count
;
447 new->num_scratch_pages
= 0;
448 agp_free_page_array(new);
451 if (type
== AGP_PHYS_MEMORY
)
452 return alloc_agpphysmem_i8xx(pg_count
, type
);
456 static void intel_i810_free_by_type(struct agp_memory
*curr
)
458 agp_free_key(curr
->key
);
459 if (curr
->type
== AGP_PHYS_MEMORY
) {
460 if (curr
->page_count
== 4)
461 i8xx_destroy_pages(curr
->pages
[0]);
463 agp_bridge
->driver
->agp_destroy_page(curr
->pages
[0],
464 AGP_PAGE_DESTROY_UNMAP
);
465 agp_bridge
->driver
->agp_destroy_page(curr
->pages
[0],
466 AGP_PAGE_DESTROY_FREE
);
468 agp_free_page_array(curr
);
473 static unsigned long intel_i810_mask_memory(struct agp_bridge_data
*bridge
,
474 dma_addr_t addr
, int type
)
476 /* Type checking must be done elsewhere */
477 return addr
| bridge
->driver
->masks
[type
].mask
;
480 static struct aper_size_info_fixed intel_i830_sizes
[] =
483 /* The 64M mode still requires a 128k gatt */
489 static void intel_i830_init_gtt_entries(void)
495 static const int ddt
[4] = { 0, 16, 32, 64 };
496 int size
; /* reserved space (in kb) at the top of stolen memory */
498 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
502 pgetbl_ctl
= readl(intel_private
.registers
+I810_PGETBL_CTL
);
504 /* The 965 has a field telling us the size of the GTT,
505 * which may be larger than what is necessary to map the
508 switch (pgetbl_ctl
& I965_PGETBL_SIZE_MASK
) {
509 case I965_PGETBL_SIZE_128KB
:
512 case I965_PGETBL_SIZE_256KB
:
515 case I965_PGETBL_SIZE_512KB
:
518 case I965_PGETBL_SIZE_1MB
:
521 case I965_PGETBL_SIZE_2MB
:
524 case I965_PGETBL_SIZE_1_5MB
:
528 dev_info(&intel_private
.pcidev
->dev
,
529 "unknown page table size, assuming 512KB\n");
532 size
+= 4; /* add in BIOS popup space */
533 } else if (IS_G33
&& !IS_PINEVIEW
) {
534 /* G33's GTT size defined in gmch_ctrl */
535 switch (gmch_ctrl
& G33_PGETBL_SIZE_MASK
) {
536 case G33_PGETBL_SIZE_1M
:
539 case G33_PGETBL_SIZE_2M
:
543 dev_info(&agp_bridge
->dev
->dev
,
544 "unknown page table size 0x%x, assuming 512KB\n",
545 (gmch_ctrl
& G33_PGETBL_SIZE_MASK
));
549 } else if (IS_G4X
|| IS_PINEVIEW
) {
550 /* On 4 series hardware, GTT stolen is separate from graphics
551 * stolen, ignore it in stolen gtt entries counting. However,
552 * 4KB of the stolen memory doesn't get mapped to the GTT.
556 /* On previous hardware, the GTT size was just what was
557 * required to map the aperture.
559 size
= agp_bridge
->driver
->fetch_size() + 4;
562 if (agp_bridge
->dev
->device
== PCI_DEVICE_ID_INTEL_82830_HB
||
563 agp_bridge
->dev
->device
== PCI_DEVICE_ID_INTEL_82845G_HB
) {
564 switch (gmch_ctrl
& I830_GMCH_GMS_MASK
) {
565 case I830_GMCH_GMS_STOLEN_512
:
566 gtt_entries
= KB(512) - KB(size
);
568 case I830_GMCH_GMS_STOLEN_1024
:
569 gtt_entries
= MB(1) - KB(size
);
571 case I830_GMCH_GMS_STOLEN_8192
:
572 gtt_entries
= MB(8) - KB(size
);
574 case I830_GMCH_GMS_LOCAL
:
575 rdct
= readb(intel_private
.registers
+I830_RDRAM_CHANNEL_TYPE
);
576 gtt_entries
= (I830_RDRAM_ND(rdct
) + 1) *
577 MB(ddt
[I830_RDRAM_DDT(rdct
)]);
584 } else if (agp_bridge
->dev
->device
== PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB
||
585 agp_bridge
->dev
->device
== PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB
) {
587 * SandyBridge has new memory control reg at 0x50.w
590 pci_read_config_word(intel_private
.pcidev
, SNB_GMCH_CTRL
, &snb_gmch_ctl
);
591 switch (snb_gmch_ctl
& SNB_GMCH_GMS_STOLEN_MASK
) {
592 case SNB_GMCH_GMS_STOLEN_32M
:
593 gtt_entries
= MB(32) - KB(size
);
595 case SNB_GMCH_GMS_STOLEN_64M
:
596 gtt_entries
= MB(64) - KB(size
);
598 case SNB_GMCH_GMS_STOLEN_96M
:
599 gtt_entries
= MB(96) - KB(size
);
601 case SNB_GMCH_GMS_STOLEN_128M
:
602 gtt_entries
= MB(128) - KB(size
);
604 case SNB_GMCH_GMS_STOLEN_160M
:
605 gtt_entries
= MB(160) - KB(size
);
607 case SNB_GMCH_GMS_STOLEN_192M
:
608 gtt_entries
= MB(192) - KB(size
);
610 case SNB_GMCH_GMS_STOLEN_224M
:
611 gtt_entries
= MB(224) - KB(size
);
613 case SNB_GMCH_GMS_STOLEN_256M
:
614 gtt_entries
= MB(256) - KB(size
);
616 case SNB_GMCH_GMS_STOLEN_288M
:
617 gtt_entries
= MB(288) - KB(size
);
619 case SNB_GMCH_GMS_STOLEN_320M
:
620 gtt_entries
= MB(320) - KB(size
);
622 case SNB_GMCH_GMS_STOLEN_352M
:
623 gtt_entries
= MB(352) - KB(size
);
625 case SNB_GMCH_GMS_STOLEN_384M
:
626 gtt_entries
= MB(384) - KB(size
);
628 case SNB_GMCH_GMS_STOLEN_416M
:
629 gtt_entries
= MB(416) - KB(size
);
631 case SNB_GMCH_GMS_STOLEN_448M
:
632 gtt_entries
= MB(448) - KB(size
);
634 case SNB_GMCH_GMS_STOLEN_480M
:
635 gtt_entries
= MB(480) - KB(size
);
637 case SNB_GMCH_GMS_STOLEN_512M
:
638 gtt_entries
= MB(512) - KB(size
);
642 switch (gmch_ctrl
& I855_GMCH_GMS_MASK
) {
643 case I855_GMCH_GMS_STOLEN_1M
:
644 gtt_entries
= MB(1) - KB(size
);
646 case I855_GMCH_GMS_STOLEN_4M
:
647 gtt_entries
= MB(4) - KB(size
);
649 case I855_GMCH_GMS_STOLEN_8M
:
650 gtt_entries
= MB(8) - KB(size
);
652 case I855_GMCH_GMS_STOLEN_16M
:
653 gtt_entries
= MB(16) - KB(size
);
655 case I855_GMCH_GMS_STOLEN_32M
:
656 gtt_entries
= MB(32) - KB(size
);
658 case I915_GMCH_GMS_STOLEN_48M
:
659 /* Check it's really I915G */
660 if (IS_I915
|| IS_I965
|| IS_G33
|| IS_G4X
)
661 gtt_entries
= MB(48) - KB(size
);
665 case I915_GMCH_GMS_STOLEN_64M
:
666 /* Check it's really I915G */
667 if (IS_I915
|| IS_I965
|| IS_G33
|| IS_G4X
)
668 gtt_entries
= MB(64) - KB(size
);
672 case G33_GMCH_GMS_STOLEN_128M
:
673 if (IS_G33
|| IS_I965
|| IS_G4X
)
674 gtt_entries
= MB(128) - KB(size
);
678 case G33_GMCH_GMS_STOLEN_256M
:
679 if (IS_G33
|| IS_I965
|| IS_G4X
)
680 gtt_entries
= MB(256) - KB(size
);
684 case INTEL_GMCH_GMS_STOLEN_96M
:
685 if (IS_I965
|| IS_G4X
)
686 gtt_entries
= MB(96) - KB(size
);
690 case INTEL_GMCH_GMS_STOLEN_160M
:
691 if (IS_I965
|| IS_G4X
)
692 gtt_entries
= MB(160) - KB(size
);
696 case INTEL_GMCH_GMS_STOLEN_224M
:
697 if (IS_I965
|| IS_G4X
)
698 gtt_entries
= MB(224) - KB(size
);
702 case INTEL_GMCH_GMS_STOLEN_352M
:
703 if (IS_I965
|| IS_G4X
)
704 gtt_entries
= MB(352) - KB(size
);
713 if (gtt_entries
> 0) {
714 dev_info(&agp_bridge
->dev
->dev
, "detected %dK %s memory\n",
715 gtt_entries
/ KB(1), local
? "local" : "stolen");
716 gtt_entries
/= KB(4);
718 dev_info(&agp_bridge
->dev
->dev
,
719 "no pre-allocated video memory detected\n");
723 intel_private
.gtt_entries
= gtt_entries
;
726 static void intel_i830_fini_flush(void)
728 kunmap(intel_private
.i8xx_page
);
729 intel_private
.i8xx_flush_page
= NULL
;
730 unmap_page_from_agp(intel_private
.i8xx_page
);
732 __free_page(intel_private
.i8xx_page
);
733 intel_private
.i8xx_page
= NULL
;
736 static void intel_i830_setup_flush(void)
738 /* return if we've already set the flush mechanism up */
739 if (intel_private
.i8xx_page
)
742 intel_private
.i8xx_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
| GFP_DMA32
);
743 if (!intel_private
.i8xx_page
)
746 intel_private
.i8xx_flush_page
= kmap(intel_private
.i8xx_page
);
747 if (!intel_private
.i8xx_flush_page
)
748 intel_i830_fini_flush();
751 /* The chipset_flush interface needs to get data that has already been
752 * flushed out of the CPU all the way out to main memory, because the GPU
753 * doesn't snoop those buffers.
755 * The 8xx series doesn't have the same lovely interface for flushing the
756 * chipset write buffers that the later chips do. According to the 865
757 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
758 * that buffer out, we just fill 1KB and clflush it out, on the assumption
759 * that it'll push whatever was in there out. It appears to work.
761 static void intel_i830_chipset_flush(struct agp_bridge_data
*bridge
)
763 unsigned int *pg
= intel_private
.i8xx_flush_page
;
768 clflush_cache_range(pg
, 1024);
769 else if (wbinvd_on_all_cpus() != 0)
770 printk(KERN_ERR
"Timed out waiting for cache flush.\n");
773 /* The intel i830 automatically initializes the agp aperture during POST.
774 * Use the memory already set aside for in the GTT.
776 static int intel_i830_create_gatt_table(struct agp_bridge_data
*bridge
)
779 struct aper_size_info_fixed
*size
;
783 size
= agp_bridge
->current_size
;
784 page_order
= size
->page_order
;
785 num_entries
= size
->num_entries
;
786 agp_bridge
->gatt_table_real
= NULL
;
788 pci_read_config_dword(intel_private
.pcidev
, I810_MMADDR
, &temp
);
791 intel_private
.registers
= ioremap(temp
, 128 * 4096);
792 if (!intel_private
.registers
)
795 temp
= readl(intel_private
.registers
+I810_PGETBL_CTL
) & 0xfffff000;
796 global_cache_flush(); /* FIXME: ?? */
798 /* we have to call this as early as possible after the MMIO base address is known */
799 intel_i830_init_gtt_entries();
800 if (intel_private
.gtt_entries
== 0) {
801 iounmap(intel_private
.registers
);
805 agp_bridge
->gatt_table
= NULL
;
807 agp_bridge
->gatt_bus_addr
= temp
;
812 /* Return the gatt table to a sane state. Use the top of stolen
813 * memory for the GTT.
815 static int intel_i830_free_gatt_table(struct agp_bridge_data
*bridge
)
820 static int intel_i830_fetch_size(void)
823 struct aper_size_info_fixed
*values
;
825 values
= A_SIZE_FIX(agp_bridge
->driver
->aperture_sizes
);
827 if (agp_bridge
->dev
->device
!= PCI_DEVICE_ID_INTEL_82830_HB
&&
828 agp_bridge
->dev
->device
!= PCI_DEVICE_ID_INTEL_82845G_HB
) {
829 /* 855GM/852GM/865G has 128MB aperture size */
830 agp_bridge
->current_size
= (void *) values
;
831 agp_bridge
->aperture_size_idx
= 0;
832 return values
[0].size
;
835 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
837 if ((gmch_ctrl
& I830_GMCH_MEM_MASK
) == I830_GMCH_MEM_128M
) {
838 agp_bridge
->current_size
= (void *) values
;
839 agp_bridge
->aperture_size_idx
= 0;
840 return values
[0].size
;
842 agp_bridge
->current_size
= (void *) (values
+ 1);
843 agp_bridge
->aperture_size_idx
= 1;
844 return values
[1].size
;
850 static int intel_i830_configure(void)
852 struct aper_size_info_fixed
*current_size
;
857 current_size
= A_SIZE_FIX(agp_bridge
->current_size
);
859 pci_read_config_dword(intel_private
.pcidev
, I810_GMADDR
, &temp
);
860 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
862 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
863 gmch_ctrl
|= I830_GMCH_ENABLED
;
864 pci_write_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, gmch_ctrl
);
866 writel(agp_bridge
->gatt_bus_addr
|I810_PGETBL_ENABLED
, intel_private
.registers
+I810_PGETBL_CTL
);
867 readl(intel_private
.registers
+I810_PGETBL_CTL
); /* PCI Posting. */
869 if (agp_bridge
->driver
->needs_scratch_page
) {
870 for (i
= intel_private
.gtt_entries
; i
< current_size
->num_entries
; i
++) {
871 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
873 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4)); /* PCI Posting. */
876 global_cache_flush();
878 intel_i830_setup_flush();
882 static void intel_i830_cleanup(void)
884 iounmap(intel_private
.registers
);
887 static int intel_i830_insert_entries(struct agp_memory
*mem
, off_t pg_start
,
890 int i
, j
, num_entries
;
895 if (mem
->page_count
== 0)
898 temp
= agp_bridge
->current_size
;
899 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
901 if (pg_start
< intel_private
.gtt_entries
) {
902 dev_printk(KERN_DEBUG
, &intel_private
.pcidev
->dev
,
903 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
904 pg_start
, intel_private
.gtt_entries
);
906 dev_info(&intel_private
.pcidev
->dev
,
907 "trying to insert into local/stolen memory\n");
911 if ((pg_start
+ mem
->page_count
) > num_entries
)
914 /* The i830 can't check the GTT for entries since its read only,
915 * depend on the caller to make the correct offset decisions.
918 if (type
!= mem
->type
)
921 mask_type
= agp_bridge
->driver
->agp_type_to_mask_type(agp_bridge
, type
);
923 if (mask_type
!= 0 && mask_type
!= AGP_PHYS_MEMORY
&&
924 mask_type
!= INTEL_AGP_CACHED_MEMORY
)
927 if (!mem
->is_flushed
)
928 global_cache_flush();
930 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
931 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
932 page_to_phys(mem
->pages
[i
]), mask_type
),
933 intel_private
.registers
+I810_PTE_BASE
+(j
*4));
935 readl(intel_private
.registers
+I810_PTE_BASE
+((j
-1)*4));
940 mem
->is_flushed
= true;
944 static int intel_i830_remove_entries(struct agp_memory
*mem
, off_t pg_start
,
949 if (mem
->page_count
== 0)
952 if (pg_start
< intel_private
.gtt_entries
) {
953 dev_info(&intel_private
.pcidev
->dev
,
954 "trying to disable local/stolen memory\n");
958 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
959 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
961 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4));
966 static struct agp_memory
*intel_i830_alloc_by_type(size_t pg_count
, int type
)
968 if (type
== AGP_PHYS_MEMORY
)
969 return alloc_agpphysmem_i8xx(pg_count
, type
);
970 /* always return NULL for other allocation types for now */
974 static int intel_alloc_chipset_flush_resource(void)
977 ret
= pci_bus_alloc_resource(agp_bridge
->dev
->bus
, &intel_private
.ifp_resource
, PAGE_SIZE
,
978 PAGE_SIZE
, PCIBIOS_MIN_MEM
, 0,
979 pcibios_align_resource
, agp_bridge
->dev
);
984 static void intel_i915_setup_chipset_flush(void)
989 pci_read_config_dword(agp_bridge
->dev
, I915_IFPADDR
, &temp
);
991 intel_alloc_chipset_flush_resource();
992 intel_private
.resource_valid
= 1;
993 pci_write_config_dword(agp_bridge
->dev
, I915_IFPADDR
, (intel_private
.ifp_resource
.start
& 0xffffffff) | 0x1);
997 intel_private
.resource_valid
= 1;
998 intel_private
.ifp_resource
.start
= temp
;
999 intel_private
.ifp_resource
.end
= temp
+ PAGE_SIZE
;
1000 ret
= request_resource(&iomem_resource
, &intel_private
.ifp_resource
);
1001 /* some BIOSes reserve this area in a pnp some don't */
1003 intel_private
.resource_valid
= 0;
1007 static void intel_i965_g33_setup_chipset_flush(void)
1009 u32 temp_hi
, temp_lo
;
1012 pci_read_config_dword(agp_bridge
->dev
, I965_IFPADDR
+ 4, &temp_hi
);
1013 pci_read_config_dword(agp_bridge
->dev
, I965_IFPADDR
, &temp_lo
);
1015 if (!(temp_lo
& 0x1)) {
1017 intel_alloc_chipset_flush_resource();
1019 intel_private
.resource_valid
= 1;
1020 pci_write_config_dword(agp_bridge
->dev
, I965_IFPADDR
+ 4,
1021 upper_32_bits(intel_private
.ifp_resource
.start
));
1022 pci_write_config_dword(agp_bridge
->dev
, I965_IFPADDR
, (intel_private
.ifp_resource
.start
& 0xffffffff) | 0x1);
1027 l64
= ((u64
)temp_hi
<< 32) | temp_lo
;
1029 intel_private
.resource_valid
= 1;
1030 intel_private
.ifp_resource
.start
= l64
;
1031 intel_private
.ifp_resource
.end
= l64
+ PAGE_SIZE
;
1032 ret
= request_resource(&iomem_resource
, &intel_private
.ifp_resource
);
1033 /* some BIOSes reserve this area in a pnp some don't */
1035 intel_private
.resource_valid
= 0;
1039 static void intel_i9xx_setup_flush(void)
1041 /* return if already configured */
1042 if (intel_private
.ifp_resource
.start
)
1048 /* setup a resource for this object */
1049 intel_private
.ifp_resource
.name
= "Intel Flush Page";
1050 intel_private
.ifp_resource
.flags
= IORESOURCE_MEM
;
1052 /* Setup chipset flush for 915 */
1053 if (IS_I965
|| IS_G33
|| IS_G4X
) {
1054 intel_i965_g33_setup_chipset_flush();
1056 intel_i915_setup_chipset_flush();
1059 if (intel_private
.ifp_resource
.start
) {
1060 intel_private
.i9xx_flush_page
= ioremap_nocache(intel_private
.ifp_resource
.start
, PAGE_SIZE
);
1061 if (!intel_private
.i9xx_flush_page
)
1062 dev_info(&intel_private
.pcidev
->dev
, "can't ioremap flush page - no chipset flushing");
1066 static int intel_i9xx_configure(void)
1068 struct aper_size_info_fixed
*current_size
;
1073 current_size
= A_SIZE_FIX(agp_bridge
->current_size
);
1075 pci_read_config_dword(intel_private
.pcidev
, I915_GMADDR
, &temp
);
1077 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1079 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
1080 gmch_ctrl
|= I830_GMCH_ENABLED
;
1081 pci_write_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, gmch_ctrl
);
1083 writel(agp_bridge
->gatt_bus_addr
|I810_PGETBL_ENABLED
, intel_private
.registers
+I810_PGETBL_CTL
);
1084 readl(intel_private
.registers
+I810_PGETBL_CTL
); /* PCI Posting. */
1086 if (agp_bridge
->driver
->needs_scratch_page
) {
1087 for (i
= intel_private
.gtt_entries
; i
< intel_private
.gtt_total_size
; i
++) {
1088 writel(agp_bridge
->scratch_page
, intel_private
.gtt
+i
);
1090 readl(intel_private
.gtt
+i
-1); /* PCI Posting. */
1093 global_cache_flush();
1095 intel_i9xx_setup_flush();
1100 static void intel_i915_cleanup(void)
1102 if (intel_private
.i9xx_flush_page
)
1103 iounmap(intel_private
.i9xx_flush_page
);
1104 if (intel_private
.resource_valid
)
1105 release_resource(&intel_private
.ifp_resource
);
1106 intel_private
.ifp_resource
.start
= 0;
1107 intel_private
.resource_valid
= 0;
1108 iounmap(intel_private
.gtt
);
1109 iounmap(intel_private
.registers
);
1112 static void intel_i915_chipset_flush(struct agp_bridge_data
*bridge
)
1114 if (intel_private
.i9xx_flush_page
)
1115 writel(1, intel_private
.i9xx_flush_page
);
1118 static int intel_i915_insert_entries(struct agp_memory
*mem
, off_t pg_start
,
1126 if (mem
->page_count
== 0)
1129 temp
= agp_bridge
->current_size
;
1130 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
1132 if (pg_start
< intel_private
.gtt_entries
) {
1133 dev_printk(KERN_DEBUG
, &intel_private
.pcidev
->dev
,
1134 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1135 pg_start
, intel_private
.gtt_entries
);
1137 dev_info(&intel_private
.pcidev
->dev
,
1138 "trying to insert into local/stolen memory\n");
1142 if ((pg_start
+ mem
->page_count
) > num_entries
)
1145 /* The i915 can't check the GTT for entries since it's read only;
1146 * depend on the caller to make the correct offset decisions.
1149 if (type
!= mem
->type
)
1152 mask_type
= agp_bridge
->driver
->agp_type_to_mask_type(agp_bridge
, type
);
1154 if (mask_type
!= 0 && mask_type
!= AGP_PHYS_MEMORY
&&
1155 mask_type
!= INTEL_AGP_CACHED_MEMORY
)
1158 if (!mem
->is_flushed
)
1159 global_cache_flush();
1161 intel_agp_insert_sg_entries(mem
, pg_start
, mask_type
);
1166 mem
->is_flushed
= true;
1170 static int intel_i915_remove_entries(struct agp_memory
*mem
, off_t pg_start
,
1175 if (mem
->page_count
== 0)
1178 if (pg_start
< intel_private
.gtt_entries
) {
1179 dev_info(&intel_private
.pcidev
->dev
,
1180 "trying to disable local/stolen memory\n");
1184 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++)
1185 writel(agp_bridge
->scratch_page
, intel_private
.gtt
+i
);
1187 readl(intel_private
.gtt
+i
-1);
1192 /* Return the aperture size by just checking the resource length. The effect
1193 * described in the spec of the MSAC registers is just changing of the
1196 static int intel_i9xx_fetch_size(void)
1198 int num_sizes
= ARRAY_SIZE(intel_i830_sizes
);
1199 int aper_size
; /* size in megabytes */
1202 aper_size
= pci_resource_len(intel_private
.pcidev
, 2) / MB(1);
1204 for (i
= 0; i
< num_sizes
; i
++) {
1205 if (aper_size
== intel_i830_sizes
[i
].size
) {
1206 agp_bridge
->current_size
= intel_i830_sizes
+ i
;
1214 static int intel_i915_get_gtt_size(void)
1221 /* G33's GTT size defined in gmch_ctrl */
1222 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
1223 switch (gmch_ctrl
& G33_PGETBL_SIZE_MASK
) {
1224 case G33_PGETBL_SIZE_1M
:
1227 case G33_PGETBL_SIZE_2M
:
1231 dev_info(&agp_bridge
->dev
->dev
,
1232 "unknown page table size 0x%x, assuming 512KB\n",
1233 (gmch_ctrl
& G33_PGETBL_SIZE_MASK
));
1237 /* On previous hardware, the GTT size was just what was
1238 * required to map the aperture.
1240 size
= agp_bridge
->driver
->fetch_size();
1246 /* The intel i915 automatically initializes the agp aperture during POST.
1247 * Use the memory already set aside for in the GTT.
1249 static int intel_i915_create_gatt_table(struct agp_bridge_data
*bridge
)
1252 struct aper_size_info_fixed
*size
;
1257 size
= agp_bridge
->current_size
;
1258 page_order
= size
->page_order
;
1259 num_entries
= size
->num_entries
;
1260 agp_bridge
->gatt_table_real
= NULL
;
1262 pci_read_config_dword(intel_private
.pcidev
, I915_MMADDR
, &temp
);
1263 pci_read_config_dword(intel_private
.pcidev
, I915_PTEADDR
, &temp2
);
1265 gtt_map_size
= intel_i915_get_gtt_size();
1267 intel_private
.gtt
= ioremap(temp2
, gtt_map_size
);
1268 if (!intel_private
.gtt
)
1271 intel_private
.gtt_total_size
= gtt_map_size
/ 4;
1275 intel_private
.registers
= ioremap(temp
, 128 * 4096);
1276 if (!intel_private
.registers
) {
1277 iounmap(intel_private
.gtt
);
1281 temp
= readl(intel_private
.registers
+I810_PGETBL_CTL
) & 0xfffff000;
1282 global_cache_flush(); /* FIXME: ? */
1284 /* we have to call this as early as possible after the MMIO base address is known */
1285 intel_i830_init_gtt_entries();
1286 if (intel_private
.gtt_entries
== 0) {
1287 iounmap(intel_private
.gtt
);
1288 iounmap(intel_private
.registers
);
1292 agp_bridge
->gatt_table
= NULL
;
1294 agp_bridge
->gatt_bus_addr
= temp
;
1300 * The i965 supports 36-bit physical addresses, but to keep
1301 * the format of the GTT the same, the bits that don't fit
1302 * in a 32-bit word are shifted down to bits 4..7.
1304 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1305 * is always zero on 32-bit architectures, so no need to make
1308 static unsigned long intel_i965_mask_memory(struct agp_bridge_data
*bridge
,
1309 dma_addr_t addr
, int type
)
1311 /* Shift high bits down */
1312 addr
|= (addr
>> 28) & 0xf0;
1314 /* Type checking must be done elsewhere */
1315 return addr
| bridge
->driver
->masks
[type
].mask
;
1318 static void intel_i965_get_gtt_range(int *gtt_offset
, int *gtt_size
)
1322 switch (agp_bridge
->dev
->device
) {
1323 case PCI_DEVICE_ID_INTEL_GM45_HB
:
1324 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB
:
1325 case PCI_DEVICE_ID_INTEL_Q45_HB
:
1326 case PCI_DEVICE_ID_INTEL_G45_HB
:
1327 case PCI_DEVICE_ID_INTEL_G41_HB
:
1328 case PCI_DEVICE_ID_INTEL_B43_HB
:
1329 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB
:
1330 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB
:
1331 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB
:
1332 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB
:
1333 *gtt_offset
= *gtt_size
= MB(2);
1335 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB
:
1336 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB
:
1337 *gtt_offset
= MB(2);
1339 pci_read_config_word(intel_private
.pcidev
, SNB_GMCH_CTRL
, &snb_gmch_ctl
);
1340 switch (snb_gmch_ctl
& SNB_GTT_SIZE_MASK
) {
1342 case SNB_GTT_SIZE_0M
:
1343 printk(KERN_ERR
"Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl
);
1346 case SNB_GTT_SIZE_1M
:
1349 case SNB_GTT_SIZE_2M
:
1355 *gtt_offset
= *gtt_size
= KB(512);
1359 /* The intel i965 automatically initializes the agp aperture during POST.
1360 * Use the memory already set aside for in the GTT.
1362 static int intel_i965_create_gatt_table(struct agp_bridge_data
*bridge
)
1365 struct aper_size_info_fixed
*size
;
1368 int gtt_offset
, gtt_size
;
1370 size
= agp_bridge
->current_size
;
1371 page_order
= size
->page_order
;
1372 num_entries
= size
->num_entries
;
1373 agp_bridge
->gatt_table_real
= NULL
;
1375 pci_read_config_dword(intel_private
.pcidev
, I915_MMADDR
, &temp
);
1379 intel_i965_get_gtt_range(>t_offset
, >t_size
);
1381 intel_private
.gtt
= ioremap((temp
+ gtt_offset
) , gtt_size
);
1383 if (!intel_private
.gtt
)
1386 intel_private
.gtt_total_size
= gtt_size
/ 4;
1388 intel_private
.registers
= ioremap(temp
, 128 * 4096);
1389 if (!intel_private
.registers
) {
1390 iounmap(intel_private
.gtt
);
1394 temp
= readl(intel_private
.registers
+I810_PGETBL_CTL
) & 0xfffff000;
1395 global_cache_flush(); /* FIXME: ? */
1397 /* we have to call this as early as possible after the MMIO base address is known */
1398 intel_i830_init_gtt_entries();
1399 if (intel_private
.gtt_entries
== 0) {
1400 iounmap(intel_private
.gtt
);
1401 iounmap(intel_private
.registers
);
1405 agp_bridge
->gatt_table
= NULL
;
1407 agp_bridge
->gatt_bus_addr
= temp
;
1412 static const struct agp_bridge_driver intel_810_driver
= {
1413 .owner
= THIS_MODULE
,
1414 .aperture_sizes
= intel_i810_sizes
,
1415 .size_type
= FIXED_APER_SIZE
,
1416 .num_aperture_sizes
= 2,
1417 .needs_scratch_page
= true,
1418 .configure
= intel_i810_configure
,
1419 .fetch_size
= intel_i810_fetch_size
,
1420 .cleanup
= intel_i810_cleanup
,
1421 .mask_memory
= intel_i810_mask_memory
,
1422 .masks
= intel_i810_masks
,
1423 .agp_enable
= intel_i810_agp_enable
,
1424 .cache_flush
= global_cache_flush
,
1425 .create_gatt_table
= agp_generic_create_gatt_table
,
1426 .free_gatt_table
= agp_generic_free_gatt_table
,
1427 .insert_memory
= intel_i810_insert_entries
,
1428 .remove_memory
= intel_i810_remove_entries
,
1429 .alloc_by_type
= intel_i810_alloc_by_type
,
1430 .free_by_type
= intel_i810_free_by_type
,
1431 .agp_alloc_page
= agp_generic_alloc_page
,
1432 .agp_alloc_pages
= agp_generic_alloc_pages
,
1433 .agp_destroy_page
= agp_generic_destroy_page
,
1434 .agp_destroy_pages
= agp_generic_destroy_pages
,
1435 .agp_type_to_mask_type
= agp_generic_type_to_mask_type
,
1438 static const struct agp_bridge_driver intel_830_driver
= {
1439 .owner
= THIS_MODULE
,
1440 .aperture_sizes
= intel_i830_sizes
,
1441 .size_type
= FIXED_APER_SIZE
,
1442 .num_aperture_sizes
= 4,
1443 .needs_scratch_page
= true,
1444 .configure
= intel_i830_configure
,
1445 .fetch_size
= intel_i830_fetch_size
,
1446 .cleanup
= intel_i830_cleanup
,
1447 .mask_memory
= intel_i810_mask_memory
,
1448 .masks
= intel_i810_masks
,
1449 .agp_enable
= intel_i810_agp_enable
,
1450 .cache_flush
= global_cache_flush
,
1451 .create_gatt_table
= intel_i830_create_gatt_table
,
1452 .free_gatt_table
= intel_i830_free_gatt_table
,
1453 .insert_memory
= intel_i830_insert_entries
,
1454 .remove_memory
= intel_i830_remove_entries
,
1455 .alloc_by_type
= intel_i830_alloc_by_type
,
1456 .free_by_type
= intel_i810_free_by_type
,
1457 .agp_alloc_page
= agp_generic_alloc_page
,
1458 .agp_alloc_pages
= agp_generic_alloc_pages
,
1459 .agp_destroy_page
= agp_generic_destroy_page
,
1460 .agp_destroy_pages
= agp_generic_destroy_pages
,
1461 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1462 .chipset_flush
= intel_i830_chipset_flush
,
1465 static const struct agp_bridge_driver intel_915_driver
= {
1466 .owner
= THIS_MODULE
,
1467 .aperture_sizes
= intel_i830_sizes
,
1468 .size_type
= FIXED_APER_SIZE
,
1469 .num_aperture_sizes
= 4,
1470 .needs_scratch_page
= true,
1471 .configure
= intel_i9xx_configure
,
1472 .fetch_size
= intel_i9xx_fetch_size
,
1473 .cleanup
= intel_i915_cleanup
,
1474 .mask_memory
= intel_i810_mask_memory
,
1475 .masks
= intel_i810_masks
,
1476 .agp_enable
= intel_i810_agp_enable
,
1477 .cache_flush
= global_cache_flush
,
1478 .create_gatt_table
= intel_i915_create_gatt_table
,
1479 .free_gatt_table
= intel_i830_free_gatt_table
,
1480 .insert_memory
= intel_i915_insert_entries
,
1481 .remove_memory
= intel_i915_remove_entries
,
1482 .alloc_by_type
= intel_i830_alloc_by_type
,
1483 .free_by_type
= intel_i810_free_by_type
,
1484 .agp_alloc_page
= agp_generic_alloc_page
,
1485 .agp_alloc_pages
= agp_generic_alloc_pages
,
1486 .agp_destroy_page
= agp_generic_destroy_page
,
1487 .agp_destroy_pages
= agp_generic_destroy_pages
,
1488 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1489 .chipset_flush
= intel_i915_chipset_flush
,
1490 #ifdef USE_PCI_DMA_API
1491 .agp_map_page
= intel_agp_map_page
,
1492 .agp_unmap_page
= intel_agp_unmap_page
,
1493 .agp_map_memory
= intel_agp_map_memory
,
1494 .agp_unmap_memory
= intel_agp_unmap_memory
,
1498 static const struct agp_bridge_driver intel_i965_driver
= {
1499 .owner
= THIS_MODULE
,
1500 .aperture_sizes
= intel_i830_sizes
,
1501 .size_type
= FIXED_APER_SIZE
,
1502 .num_aperture_sizes
= 4,
1503 .needs_scratch_page
= true,
1504 .configure
= intel_i9xx_configure
,
1505 .fetch_size
= intel_i9xx_fetch_size
,
1506 .cleanup
= intel_i915_cleanup
,
1507 .mask_memory
= intel_i965_mask_memory
,
1508 .masks
= intel_i810_masks
,
1509 .agp_enable
= intel_i810_agp_enable
,
1510 .cache_flush
= global_cache_flush
,
1511 .create_gatt_table
= intel_i965_create_gatt_table
,
1512 .free_gatt_table
= intel_i830_free_gatt_table
,
1513 .insert_memory
= intel_i915_insert_entries
,
1514 .remove_memory
= intel_i915_remove_entries
,
1515 .alloc_by_type
= intel_i830_alloc_by_type
,
1516 .free_by_type
= intel_i810_free_by_type
,
1517 .agp_alloc_page
= agp_generic_alloc_page
,
1518 .agp_alloc_pages
= agp_generic_alloc_pages
,
1519 .agp_destroy_page
= agp_generic_destroy_page
,
1520 .agp_destroy_pages
= agp_generic_destroy_pages
,
1521 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1522 .chipset_flush
= intel_i915_chipset_flush
,
1523 #ifdef USE_PCI_DMA_API
1524 .agp_map_page
= intel_agp_map_page
,
1525 .agp_unmap_page
= intel_agp_unmap_page
,
1526 .agp_map_memory
= intel_agp_map_memory
,
1527 .agp_unmap_memory
= intel_agp_unmap_memory
,
1531 static const struct agp_bridge_driver intel_g33_driver
= {
1532 .owner
= THIS_MODULE
,
1533 .aperture_sizes
= intel_i830_sizes
,
1534 .size_type
= FIXED_APER_SIZE
,
1535 .num_aperture_sizes
= 4,
1536 .needs_scratch_page
= true,
1537 .configure
= intel_i9xx_configure
,
1538 .fetch_size
= intel_i9xx_fetch_size
,
1539 .cleanup
= intel_i915_cleanup
,
1540 .mask_memory
= intel_i965_mask_memory
,
1541 .masks
= intel_i810_masks
,
1542 .agp_enable
= intel_i810_agp_enable
,
1543 .cache_flush
= global_cache_flush
,
1544 .create_gatt_table
= intel_i915_create_gatt_table
,
1545 .free_gatt_table
= intel_i830_free_gatt_table
,
1546 .insert_memory
= intel_i915_insert_entries
,
1547 .remove_memory
= intel_i915_remove_entries
,
1548 .alloc_by_type
= intel_i830_alloc_by_type
,
1549 .free_by_type
= intel_i810_free_by_type
,
1550 .agp_alloc_page
= agp_generic_alloc_page
,
1551 .agp_alloc_pages
= agp_generic_alloc_pages
,
1552 .agp_destroy_page
= agp_generic_destroy_page
,
1553 .agp_destroy_pages
= agp_generic_destroy_pages
,
1554 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1555 .chipset_flush
= intel_i915_chipset_flush
,
1556 #ifdef USE_PCI_DMA_API
1557 .agp_map_page
= intel_agp_map_page
,
1558 .agp_unmap_page
= intel_agp_unmap_page
,
1559 .agp_map_memory
= intel_agp_map_memory
,
1560 .agp_unmap_memory
= intel_agp_unmap_memory
,