2 * Intel GTT (Graphics Translation Table) routines
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
15 * /fairy-tale-mode off
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/pagemap.h>
23 #include <linux/agp_backend.h>
26 #include "intel-agp.h"
27 #include <linux/intel-gtt.h>
28 #include <drm/intel-gtt.h>
31 * If we have Intel graphics, we're not going to have anything other than
32 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
33 * on the Intel IOMMU support (CONFIG_DMAR).
34 * Only newer chipsets need to bother with this, of course.
37 #define USE_PCI_DMA_API 1
40 /* Max amount of stolen space, anything above will be returned to Linux */
41 int intel_max_stolen
= 32 * 1024 * 1024;
42 EXPORT_SYMBOL(intel_max_stolen
);
44 static const struct aper_size_info_fixed intel_i810_sizes
[] =
47 /* The 32M mode still requires a 64k gatt */
51 #define AGP_DCACHE_MEMORY 1
52 #define AGP_PHYS_MEMORY 2
53 #define INTEL_AGP_CACHED_MEMORY 3
55 static struct gatt_mask intel_i810_masks
[] =
57 {.mask
= I810_PTE_VALID
, .type
= 0},
58 {.mask
= (I810_PTE_VALID
| I810_PTE_LOCAL
), .type
= AGP_DCACHE_MEMORY
},
59 {.mask
= I810_PTE_VALID
, .type
= 0},
60 {.mask
= I810_PTE_VALID
| I830_PTE_SYSTEM_CACHED
,
61 .type
= INTEL_AGP_CACHED_MEMORY
}
64 #define INTEL_AGP_UNCACHED_MEMORY 0
65 #define INTEL_AGP_CACHED_MEMORY_LLC 1
66 #define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
67 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
68 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
70 static struct gatt_mask intel_gen6_masks
[] =
72 {.mask
= I810_PTE_VALID
| GEN6_PTE_UNCACHED
,
73 .type
= INTEL_AGP_UNCACHED_MEMORY
},
74 {.mask
= I810_PTE_VALID
| GEN6_PTE_LLC
,
75 .type
= INTEL_AGP_CACHED_MEMORY_LLC
},
76 {.mask
= I810_PTE_VALID
| GEN6_PTE_LLC
| GEN6_PTE_GFDT
,
77 .type
= INTEL_AGP_CACHED_MEMORY_LLC_GFDT
},
78 {.mask
= I810_PTE_VALID
| GEN6_PTE_LLC_MLC
,
79 .type
= INTEL_AGP_CACHED_MEMORY_LLC_MLC
},
80 {.mask
= I810_PTE_VALID
| GEN6_PTE_LLC_MLC
| GEN6_PTE_GFDT
,
81 .type
= INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT
},
84 static struct _intel_private
{
85 struct intel_gtt base
;
86 struct pci_dev
*pcidev
; /* device one */
87 struct pci_dev
*bridge_dev
;
88 u8 __iomem
*registers
;
89 u32 __iomem
*gtt
; /* I915G */
90 int num_dcache_entries
;
92 void __iomem
*i9xx_flush_page
;
93 void *i8xx_flush_page
;
95 struct page
*i8xx_page
;
96 struct resource ifp_resource
;
100 #ifdef USE_PCI_DMA_API
101 static int intel_agp_map_page(struct page
*page
, dma_addr_t
*ret
)
103 *ret
= pci_map_page(intel_private
.pcidev
, page
, 0,
104 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
105 if (pci_dma_mapping_error(intel_private
.pcidev
, *ret
))
110 static void intel_agp_unmap_page(struct page
*page
, dma_addr_t dma
)
112 pci_unmap_page(intel_private
.pcidev
, dma
,
113 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
116 static void intel_agp_free_sglist(struct agp_memory
*mem
)
120 st
.sgl
= mem
->sg_list
;
121 st
.orig_nents
= st
.nents
= mem
->page_count
;
129 static int intel_agp_map_memory(struct agp_memory
*mem
)
132 struct scatterlist
*sg
;
135 DBG("try mapping %lu pages\n", (unsigned long)mem
->page_count
);
137 if (sg_alloc_table(&st
, mem
->page_count
, GFP_KERNEL
))
140 mem
->sg_list
= sg
= st
.sgl
;
142 for (i
= 0 ; i
< mem
->page_count
; i
++, sg
= sg_next(sg
))
143 sg_set_page(sg
, mem
->pages
[i
], PAGE_SIZE
, 0);
145 mem
->num_sg
= pci_map_sg(intel_private
.pcidev
, mem
->sg_list
,
146 mem
->page_count
, PCI_DMA_BIDIRECTIONAL
);
147 if (unlikely(!mem
->num_sg
))
157 static void intel_agp_unmap_memory(struct agp_memory
*mem
)
159 DBG("try unmapping %lu pages\n", (unsigned long)mem
->page_count
);
161 pci_unmap_sg(intel_private
.pcidev
, mem
->sg_list
,
162 mem
->page_count
, PCI_DMA_BIDIRECTIONAL
);
163 intel_agp_free_sglist(mem
);
166 static void intel_agp_insert_sg_entries(struct agp_memory
*mem
,
167 off_t pg_start
, int mask_type
)
169 struct scatterlist
*sg
;
174 WARN_ON(!mem
->num_sg
);
176 if (mem
->num_sg
== mem
->page_count
) {
177 for_each_sg(mem
->sg_list
, sg
, mem
->page_count
, i
) {
178 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
179 sg_dma_address(sg
), mask_type
),
180 intel_private
.gtt
+j
);
184 /* sg may merge pages, but we have to separate
185 * per-page addr for GTT */
188 for_each_sg(mem
->sg_list
, sg
, mem
->num_sg
, i
) {
189 len
= sg_dma_len(sg
) / PAGE_SIZE
;
190 for (m
= 0; m
< len
; m
++) {
191 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
192 sg_dma_address(sg
) + m
* PAGE_SIZE
,
194 intel_private
.gtt
+j
);
199 readl(intel_private
.gtt
+j
-1);
204 static void intel_agp_insert_sg_entries(struct agp_memory
*mem
,
205 off_t pg_start
, int mask_type
)
209 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
210 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
211 page_to_phys(mem
->pages
[i
]), mask_type
),
212 intel_private
.gtt
+j
);
215 readl(intel_private
.gtt
+j
-1);
220 static int intel_i810_fetch_size(void)
223 struct aper_size_info_fixed
*values
;
225 pci_read_config_dword(intel_private
.bridge_dev
,
226 I810_SMRAM_MISCC
, &smram_miscc
);
227 values
= A_SIZE_FIX(agp_bridge
->driver
->aperture_sizes
);
229 if ((smram_miscc
& I810_GMS
) == I810_GMS_DISABLE
) {
230 dev_warn(&intel_private
.bridge_dev
->dev
, "i810 is disabled\n");
233 if ((smram_miscc
& I810_GFX_MEM_WIN_SIZE
) == I810_GFX_MEM_WIN_32M
) {
234 agp_bridge
->current_size
= (void *) (values
+ 1);
235 agp_bridge
->aperture_size_idx
= 1;
236 return values
[1].size
;
238 agp_bridge
->current_size
= (void *) (values
);
239 agp_bridge
->aperture_size_idx
= 0;
240 return values
[0].size
;
246 static int intel_i810_configure(void)
248 struct aper_size_info_fixed
*current_size
;
252 current_size
= A_SIZE_FIX(agp_bridge
->current_size
);
254 if (!intel_private
.registers
) {
255 pci_read_config_dword(intel_private
.pcidev
, I810_MMADDR
, &temp
);
258 intel_private
.registers
= ioremap(temp
, 128 * 4096);
259 if (!intel_private
.registers
) {
260 dev_err(&intel_private
.pcidev
->dev
,
261 "can't remap memory\n");
266 if ((readl(intel_private
.registers
+I810_DRAM_CTL
)
267 & I810_DRAM_ROW_0
) == I810_DRAM_ROW_0_SDRAM
) {
268 /* This will need to be dynamically assigned */
269 dev_info(&intel_private
.pcidev
->dev
,
270 "detected 4MB dedicated video ram\n");
271 intel_private
.num_dcache_entries
= 1024;
273 pci_read_config_dword(intel_private
.pcidev
, I810_GMADDR
, &temp
);
274 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
275 writel(agp_bridge
->gatt_bus_addr
| I810_PGETBL_ENABLED
, intel_private
.registers
+I810_PGETBL_CTL
);
276 readl(intel_private
.registers
+I810_PGETBL_CTL
); /* PCI Posting. */
278 if (agp_bridge
->driver
->needs_scratch_page
) {
279 for (i
= 0; i
< current_size
->num_entries
; i
++) {
280 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
282 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4)); /* PCI posting. */
284 global_cache_flush();
288 static void intel_i810_cleanup(void)
290 writel(0, intel_private
.registers
+I810_PGETBL_CTL
);
291 readl(intel_private
.registers
); /* PCI Posting. */
292 iounmap(intel_private
.registers
);
295 static void intel_i810_agp_enable(struct agp_bridge_data
*bridge
, u32 mode
)
300 /* Exists to support ARGB cursors */
301 static struct page
*i8xx_alloc_pages(void)
305 page
= alloc_pages(GFP_KERNEL
| GFP_DMA32
, 2);
309 if (set_pages_uc(page
, 4) < 0) {
310 set_pages_wb(page
, 4);
311 __free_pages(page
, 2);
315 atomic_inc(&agp_bridge
->current_memory_agp
);
319 static void i8xx_destroy_pages(struct page
*page
)
324 set_pages_wb(page
, 4);
326 __free_pages(page
, 2);
327 atomic_dec(&agp_bridge
->current_memory_agp
);
330 static int intel_i830_type_to_mask_type(struct agp_bridge_data
*bridge
,
333 if (type
< AGP_USER_TYPES
)
335 else if (type
== AGP_USER_CACHED_MEMORY
)
336 return INTEL_AGP_CACHED_MEMORY
;
341 static int intel_gen6_type_to_mask_type(struct agp_bridge_data
*bridge
,
344 unsigned int type_mask
= type
& ~AGP_USER_CACHED_MEMORY_GFDT
;
345 unsigned int gfdt
= type
& AGP_USER_CACHED_MEMORY_GFDT
;
347 if (type_mask
== AGP_USER_UNCACHED_MEMORY
)
348 return INTEL_AGP_UNCACHED_MEMORY
;
349 else if (type_mask
== AGP_USER_CACHED_MEMORY_LLC_MLC
)
350 return gfdt
? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT
:
351 INTEL_AGP_CACHED_MEMORY_LLC_MLC
;
352 else /* set 'normal'/'cached' to LLC by default */
353 return gfdt
? INTEL_AGP_CACHED_MEMORY_LLC_GFDT
:
354 INTEL_AGP_CACHED_MEMORY_LLC
;
358 static int intel_i810_insert_entries(struct agp_memory
*mem
, off_t pg_start
,
361 int i
, j
, num_entries
;
366 if (mem
->page_count
== 0)
369 temp
= agp_bridge
->current_size
;
370 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
372 if ((pg_start
+ mem
->page_count
) > num_entries
)
376 for (j
= pg_start
; j
< (pg_start
+ mem
->page_count
); j
++) {
377 if (!PGE_EMPTY(agp_bridge
, readl(agp_bridge
->gatt_table
+j
))) {
383 if (type
!= mem
->type
)
386 mask_type
= agp_bridge
->driver
->agp_type_to_mask_type(agp_bridge
, type
);
389 case AGP_DCACHE_MEMORY
:
390 if (!mem
->is_flushed
)
391 global_cache_flush();
392 for (i
= pg_start
; i
< (pg_start
+ mem
->page_count
); i
++) {
393 writel((i
*4096)|I810_PTE_LOCAL
|I810_PTE_VALID
,
394 intel_private
.registers
+I810_PTE_BASE
+(i
*4));
396 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4));
398 case AGP_PHYS_MEMORY
:
399 case AGP_NORMAL_MEMORY
:
400 if (!mem
->is_flushed
)
401 global_cache_flush();
402 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
403 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
404 page_to_phys(mem
->pages
[i
]), mask_type
),
405 intel_private
.registers
+I810_PTE_BASE
+(j
*4));
407 readl(intel_private
.registers
+I810_PTE_BASE
+((j
-1)*4));
416 mem
->is_flushed
= true;
420 static int intel_i810_remove_entries(struct agp_memory
*mem
, off_t pg_start
,
425 if (mem
->page_count
== 0)
428 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
429 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
431 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4));
437 * The i810/i830 requires a physical address to program its mouse
438 * pointer into hardware.
439 * However the Xserver still writes to it through the agp aperture.
441 static struct agp_memory
*alloc_agpphysmem_i8xx(size_t pg_count
, int type
)
443 struct agp_memory
*new;
447 case 1: page
= agp_bridge
->driver
->agp_alloc_page(agp_bridge
);
450 /* kludge to get 4 physical pages for ARGB cursor */
451 page
= i8xx_alloc_pages();
460 new = agp_create_memory(pg_count
);
464 new->pages
[0] = page
;
466 /* kludge to get 4 physical pages for ARGB cursor */
467 new->pages
[1] = new->pages
[0] + 1;
468 new->pages
[2] = new->pages
[1] + 1;
469 new->pages
[3] = new->pages
[2] + 1;
471 new->page_count
= pg_count
;
472 new->num_scratch_pages
= pg_count
;
473 new->type
= AGP_PHYS_MEMORY
;
474 new->physical
= page_to_phys(new->pages
[0]);
478 static struct agp_memory
*intel_i810_alloc_by_type(size_t pg_count
, int type
)
480 struct agp_memory
*new;
482 if (type
== AGP_DCACHE_MEMORY
) {
483 if (pg_count
!= intel_private
.num_dcache_entries
)
486 new = agp_create_memory(1);
490 new->type
= AGP_DCACHE_MEMORY
;
491 new->page_count
= pg_count
;
492 new->num_scratch_pages
= 0;
493 agp_free_page_array(new);
496 if (type
== AGP_PHYS_MEMORY
)
497 return alloc_agpphysmem_i8xx(pg_count
, type
);
501 static void intel_i810_free_by_type(struct agp_memory
*curr
)
503 agp_free_key(curr
->key
);
504 if (curr
->type
== AGP_PHYS_MEMORY
) {
505 if (curr
->page_count
== 4)
506 i8xx_destroy_pages(curr
->pages
[0]);
508 agp_bridge
->driver
->agp_destroy_page(curr
->pages
[0],
509 AGP_PAGE_DESTROY_UNMAP
);
510 agp_bridge
->driver
->agp_destroy_page(curr
->pages
[0],
511 AGP_PAGE_DESTROY_FREE
);
513 agp_free_page_array(curr
);
518 static unsigned long intel_i810_mask_memory(struct agp_bridge_data
*bridge
,
519 dma_addr_t addr
, int type
)
521 /* Type checking must be done elsewhere */
522 return addr
| bridge
->driver
->masks
[type
].mask
;
525 static struct aper_size_info_fixed intel_i830_sizes
[] =
528 /* The 64M mode still requires a 128k gatt */
534 static unsigned int intel_gtt_stolen_entries(void)
539 static const int ddt
[4] = { 0, 16, 32, 64 };
540 int size
; /* reserved space (in kb) at the top of stolen memory */
541 unsigned int overhead_entries
, stolen_entries
;
542 unsigned int stolen_size
= 0;
544 pci_read_config_word(intel_private
.bridge_dev
,
545 I830_GMCH_CTRL
, &gmch_ctrl
);
549 pgetbl_ctl
= readl(intel_private
.registers
+I810_PGETBL_CTL
);
551 /* The 965 has a field telling us the size of the GTT,
552 * which may be larger than what is necessary to map the
555 switch (pgetbl_ctl
& I965_PGETBL_SIZE_MASK
) {
556 case I965_PGETBL_SIZE_128KB
:
559 case I965_PGETBL_SIZE_256KB
:
562 case I965_PGETBL_SIZE_512KB
:
565 case I965_PGETBL_SIZE_1MB
:
568 case I965_PGETBL_SIZE_2MB
:
571 case I965_PGETBL_SIZE_1_5MB
:
575 dev_info(&intel_private
.pcidev
->dev
,
576 "unknown page table size, assuming 512KB\n");
579 size
+= 4; /* add in BIOS popup space */
580 } else if (IS_G33
&& !IS_PINEVIEW
) {
581 /* G33's GTT size defined in gmch_ctrl */
582 switch (gmch_ctrl
& G33_PGETBL_SIZE_MASK
) {
583 case G33_PGETBL_SIZE_1M
:
586 case G33_PGETBL_SIZE_2M
:
590 dev_info(&intel_private
.bridge_dev
->dev
,
591 "unknown page table size 0x%x, assuming 512KB\n",
592 (gmch_ctrl
& G33_PGETBL_SIZE_MASK
));
596 } else if (IS_G4X
|| IS_PINEVIEW
) {
597 /* On 4 series hardware, GTT stolen is separate from graphics
598 * stolen, ignore it in stolen gtt entries counting. However,
599 * 4KB of the stolen memory doesn't get mapped to the GTT.
603 /* On previous hardware, the GTT size was just what was
604 * required to map the aperture.
606 size
= agp_bridge
->driver
->fetch_size() + 4;
609 overhead_entries
= size
/4;
611 if (intel_private
.bridge_dev
->device
== PCI_DEVICE_ID_INTEL_82830_HB
||
612 intel_private
.bridge_dev
->device
== PCI_DEVICE_ID_INTEL_82845G_HB
) {
613 switch (gmch_ctrl
& I830_GMCH_GMS_MASK
) {
614 case I830_GMCH_GMS_STOLEN_512
:
615 stolen_size
= KB(512);
617 case I830_GMCH_GMS_STOLEN_1024
:
620 case I830_GMCH_GMS_STOLEN_8192
:
623 case I830_GMCH_GMS_LOCAL
:
624 rdct
= readb(intel_private
.registers
+I830_RDRAM_CHANNEL_TYPE
);
625 stolen_size
= (I830_RDRAM_ND(rdct
) + 1) *
626 MB(ddt
[I830_RDRAM_DDT(rdct
)]);
635 * SandyBridge has new memory control reg at 0x50.w
638 pci_read_config_word(intel_private
.pcidev
, SNB_GMCH_CTRL
, &snb_gmch_ctl
);
639 switch (snb_gmch_ctl
& SNB_GMCH_GMS_STOLEN_MASK
) {
640 case SNB_GMCH_GMS_STOLEN_32M
:
641 stolen_size
= MB(32);
643 case SNB_GMCH_GMS_STOLEN_64M
:
644 stolen_size
= MB(64);
646 case SNB_GMCH_GMS_STOLEN_96M
:
647 stolen_size
= MB(96);
649 case SNB_GMCH_GMS_STOLEN_128M
:
650 stolen_size
= MB(128);
652 case SNB_GMCH_GMS_STOLEN_160M
:
653 stolen_size
= MB(160);
655 case SNB_GMCH_GMS_STOLEN_192M
:
656 stolen_size
= MB(192);
658 case SNB_GMCH_GMS_STOLEN_224M
:
659 stolen_size
= MB(224);
661 case SNB_GMCH_GMS_STOLEN_256M
:
662 stolen_size
= MB(256);
664 case SNB_GMCH_GMS_STOLEN_288M
:
665 stolen_size
= MB(288);
667 case SNB_GMCH_GMS_STOLEN_320M
:
668 stolen_size
= MB(320);
670 case SNB_GMCH_GMS_STOLEN_352M
:
671 stolen_size
= MB(352);
673 case SNB_GMCH_GMS_STOLEN_384M
:
674 stolen_size
= MB(384);
676 case SNB_GMCH_GMS_STOLEN_416M
:
677 stolen_size
= MB(416);
679 case SNB_GMCH_GMS_STOLEN_448M
:
680 stolen_size
= MB(448);
682 case SNB_GMCH_GMS_STOLEN_480M
:
683 stolen_size
= MB(480);
685 case SNB_GMCH_GMS_STOLEN_512M
:
686 stolen_size
= MB(512);
690 switch (gmch_ctrl
& I855_GMCH_GMS_MASK
) {
691 case I855_GMCH_GMS_STOLEN_1M
:
694 case I855_GMCH_GMS_STOLEN_4M
:
697 case I855_GMCH_GMS_STOLEN_8M
:
700 case I855_GMCH_GMS_STOLEN_16M
:
701 stolen_size
= MB(16);
703 case I855_GMCH_GMS_STOLEN_32M
:
704 stolen_size
= MB(32);
706 case I915_GMCH_GMS_STOLEN_48M
:
707 stolen_size
= MB(48);
709 case I915_GMCH_GMS_STOLEN_64M
:
710 stolen_size
= MB(64);
712 case G33_GMCH_GMS_STOLEN_128M
:
713 stolen_size
= MB(128);
715 case G33_GMCH_GMS_STOLEN_256M
:
716 stolen_size
= MB(256);
718 case INTEL_GMCH_GMS_STOLEN_96M
:
719 stolen_size
= MB(96);
721 case INTEL_GMCH_GMS_STOLEN_160M
:
722 stolen_size
= MB(160);
724 case INTEL_GMCH_GMS_STOLEN_224M
:
725 stolen_size
= MB(224);
727 case INTEL_GMCH_GMS_STOLEN_352M
:
728 stolen_size
= MB(352);
736 if (!local
&& stolen_size
> intel_max_stolen
) {
737 dev_info(&intel_private
.bridge_dev
->dev
,
738 "detected %dK stolen memory, trimming to %dK\n",
739 stolen_size
/ KB(1), intel_max_stolen
/ KB(1));
740 stolen_size
= intel_max_stolen
;
741 } else if (stolen_size
> 0) {
742 dev_info(&intel_private
.bridge_dev
->dev
, "detected %dK %s memory\n",
743 stolen_size
/ KB(1), local
? "local" : "stolen");
745 dev_info(&intel_private
.bridge_dev
->dev
,
746 "no pre-allocated video memory detected\n");
750 stolen_entries
= stolen_size
/KB(4) - overhead_entries
;
752 return stolen_entries
;
755 static unsigned int intel_gtt_mappable_entries(void)
757 unsigned int aperture_size
;
760 aperture_size
= 1024 * 1024;
762 pci_read_config_word(intel_private
.bridge_dev
,
763 I830_GMCH_CTRL
, &gmch_ctrl
);
765 switch (intel_private
.pcidev
->device
) {
766 case PCI_DEVICE_ID_INTEL_82830_CGC
:
767 case PCI_DEVICE_ID_INTEL_82845G_IG
:
768 case PCI_DEVICE_ID_INTEL_82855GM_IG
:
769 case PCI_DEVICE_ID_INTEL_82865_IG
:
770 if ((gmch_ctrl
& I830_GMCH_MEM_MASK
) == I830_GMCH_MEM_64M
)
773 aperture_size
*= 128;
776 /* 9xx supports large sizes, just look at the length */
777 aperture_size
= pci_resource_len(intel_private
.pcidev
, 2);
781 return aperture_size
>> PAGE_SHIFT
;
784 static int intel_gtt_init(void)
786 /* we have to call this as early as possible after the MMIO base address is known */
787 intel_private
.base
.gtt_stolen_entries
= intel_gtt_stolen_entries();
788 if (intel_private
.base
.gtt_stolen_entries
== 0) {
789 iounmap(intel_private
.registers
);
796 static int intel_fake_agp_fetch_size(void)
798 unsigned int aper_size
;
800 int num_sizes
= ARRAY_SIZE(intel_i830_sizes
);
802 aper_size
= (intel_private
.base
.gtt_mappable_entries
<< PAGE_SHIFT
)
805 for (i
= 0; i
< num_sizes
; i
++) {
806 if (aper_size
== intel_i830_sizes
[i
].size
) {
807 agp_bridge
->current_size
= intel_i830_sizes
+ i
;
815 static void intel_i830_fini_flush(void)
817 kunmap(intel_private
.i8xx_page
);
818 intel_private
.i8xx_flush_page
= NULL
;
819 unmap_page_from_agp(intel_private
.i8xx_page
);
821 __free_page(intel_private
.i8xx_page
);
822 intel_private
.i8xx_page
= NULL
;
825 static void intel_i830_setup_flush(void)
827 /* return if we've already set the flush mechanism up */
828 if (intel_private
.i8xx_page
)
831 intel_private
.i8xx_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
| GFP_DMA32
);
832 if (!intel_private
.i8xx_page
)
835 intel_private
.i8xx_flush_page
= kmap(intel_private
.i8xx_page
);
836 if (!intel_private
.i8xx_flush_page
)
837 intel_i830_fini_flush();
840 /* The chipset_flush interface needs to get data that has already been
841 * flushed out of the CPU all the way out to main memory, because the GPU
842 * doesn't snoop those buffers.
844 * The 8xx series doesn't have the same lovely interface for flushing the
845 * chipset write buffers that the later chips do. According to the 865
846 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
847 * that buffer out, we just fill 1KB and clflush it out, on the assumption
848 * that it'll push whatever was in there out. It appears to work.
850 static void intel_i830_chipset_flush(struct agp_bridge_data
*bridge
)
852 unsigned int *pg
= intel_private
.i8xx_flush_page
;
857 clflush_cache_range(pg
, 1024);
858 else if (wbinvd_on_all_cpus() != 0)
859 printk(KERN_ERR
"Timed out waiting for cache flush.\n");
862 /* The intel i830 automatically initializes the agp aperture during POST.
863 * Use the memory already set aside for in the GTT.
865 static int intel_i830_create_gatt_table(struct agp_bridge_data
*bridge
)
868 struct aper_size_info_fixed
*size
;
872 size
= agp_bridge
->current_size
;
873 page_order
= size
->page_order
;
874 num_entries
= size
->num_entries
;
875 agp_bridge
->gatt_table_real
= NULL
;
877 pci_read_config_dword(intel_private
.pcidev
, I810_MMADDR
, &temp
);
880 intel_private
.registers
= ioremap(temp
, 128 * 4096);
881 if (!intel_private
.registers
)
884 temp
= readl(intel_private
.registers
+I810_PGETBL_CTL
) & 0xfffff000;
885 global_cache_flush(); /* FIXME: ?? */
887 ret
= intel_gtt_init();
891 agp_bridge
->gatt_table
= NULL
;
893 agp_bridge
->gatt_bus_addr
= temp
;
898 /* Return the gatt table to a sane state. Use the top of stolen
899 * memory for the GTT.
901 static int intel_i830_free_gatt_table(struct agp_bridge_data
*bridge
)
906 static int intel_i830_configure(void)
908 struct aper_size_info_fixed
*current_size
;
913 current_size
= A_SIZE_FIX(agp_bridge
->current_size
);
915 pci_read_config_dword(intel_private
.pcidev
, I810_GMADDR
, &temp
);
916 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
918 pci_read_config_word(intel_private
.bridge_dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
919 gmch_ctrl
|= I830_GMCH_ENABLED
;
920 pci_write_config_word(intel_private
.bridge_dev
, I830_GMCH_CTRL
, gmch_ctrl
);
922 writel(agp_bridge
->gatt_bus_addr
|I810_PGETBL_ENABLED
, intel_private
.registers
+I810_PGETBL_CTL
);
923 readl(intel_private
.registers
+I810_PGETBL_CTL
); /* PCI Posting. */
925 if (agp_bridge
->driver
->needs_scratch_page
) {
926 for (i
= intel_private
.base
.gtt_stolen_entries
; i
< current_size
->num_entries
; i
++) {
927 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
929 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4)); /* PCI Posting. */
932 global_cache_flush();
934 intel_i830_setup_flush();
938 static void intel_i830_cleanup(void)
940 iounmap(intel_private
.registers
);
943 static int intel_i830_insert_entries(struct agp_memory
*mem
, off_t pg_start
,
946 int i
, j
, num_entries
;
951 if (mem
->page_count
== 0)
954 temp
= agp_bridge
->current_size
;
955 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
957 if (pg_start
< intel_private
.base
.gtt_stolen_entries
) {
958 dev_printk(KERN_DEBUG
, &intel_private
.pcidev
->dev
,
959 "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
960 pg_start
, intel_private
.base
.gtt_stolen_entries
);
962 dev_info(&intel_private
.pcidev
->dev
,
963 "trying to insert into local/stolen memory\n");
967 if ((pg_start
+ mem
->page_count
) > num_entries
)
970 /* The i830 can't check the GTT for entries since its read only,
971 * depend on the caller to make the correct offset decisions.
974 if (type
!= mem
->type
)
977 mask_type
= agp_bridge
->driver
->agp_type_to_mask_type(agp_bridge
, type
);
979 if (mask_type
!= 0 && mask_type
!= AGP_PHYS_MEMORY
&&
980 mask_type
!= INTEL_AGP_CACHED_MEMORY
)
983 if (!mem
->is_flushed
)
984 global_cache_flush();
986 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
987 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
988 page_to_phys(mem
->pages
[i
]), mask_type
),
989 intel_private
.registers
+I810_PTE_BASE
+(j
*4));
991 readl(intel_private
.registers
+I810_PTE_BASE
+((j
-1)*4));
996 mem
->is_flushed
= true;
1000 static int intel_i830_remove_entries(struct agp_memory
*mem
, off_t pg_start
,
1005 if (mem
->page_count
== 0)
1008 if (pg_start
< intel_private
.base
.gtt_stolen_entries
) {
1009 dev_info(&intel_private
.pcidev
->dev
,
1010 "trying to disable local/stolen memory\n");
1014 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
1015 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
1017 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4));
1022 static struct agp_memory
*intel_i830_alloc_by_type(size_t pg_count
, int type
)
1024 if (type
== AGP_PHYS_MEMORY
)
1025 return alloc_agpphysmem_i8xx(pg_count
, type
);
1026 /* always return NULL for other allocation types for now */
1030 static int intel_alloc_chipset_flush_resource(void)
1033 ret
= pci_bus_alloc_resource(intel_private
.bridge_dev
->bus
, &intel_private
.ifp_resource
, PAGE_SIZE
,
1034 PAGE_SIZE
, PCIBIOS_MIN_MEM
, 0,
1035 pcibios_align_resource
, intel_private
.bridge_dev
);
1040 static void intel_i915_setup_chipset_flush(void)
1045 pci_read_config_dword(intel_private
.bridge_dev
, I915_IFPADDR
, &temp
);
1046 if (!(temp
& 0x1)) {
1047 intel_alloc_chipset_flush_resource();
1048 intel_private
.resource_valid
= 1;
1049 pci_write_config_dword(intel_private
.bridge_dev
, I915_IFPADDR
, (intel_private
.ifp_resource
.start
& 0xffffffff) | 0x1);
1053 intel_private
.resource_valid
= 1;
1054 intel_private
.ifp_resource
.start
= temp
;
1055 intel_private
.ifp_resource
.end
= temp
+ PAGE_SIZE
;
1056 ret
= request_resource(&iomem_resource
, &intel_private
.ifp_resource
);
1057 /* some BIOSes reserve this area in a pnp some don't */
1059 intel_private
.resource_valid
= 0;
1063 static void intel_i965_g33_setup_chipset_flush(void)
1065 u32 temp_hi
, temp_lo
;
1068 pci_read_config_dword(intel_private
.bridge_dev
, I965_IFPADDR
+ 4, &temp_hi
);
1069 pci_read_config_dword(intel_private
.bridge_dev
, I965_IFPADDR
, &temp_lo
);
1071 if (!(temp_lo
& 0x1)) {
1073 intel_alloc_chipset_flush_resource();
1075 intel_private
.resource_valid
= 1;
1076 pci_write_config_dword(intel_private
.bridge_dev
, I965_IFPADDR
+ 4,
1077 upper_32_bits(intel_private
.ifp_resource
.start
));
1078 pci_write_config_dword(intel_private
.bridge_dev
, I965_IFPADDR
, (intel_private
.ifp_resource
.start
& 0xffffffff) | 0x1);
1083 l64
= ((u64
)temp_hi
<< 32) | temp_lo
;
1085 intel_private
.resource_valid
= 1;
1086 intel_private
.ifp_resource
.start
= l64
;
1087 intel_private
.ifp_resource
.end
= l64
+ PAGE_SIZE
;
1088 ret
= request_resource(&iomem_resource
, &intel_private
.ifp_resource
);
1089 /* some BIOSes reserve this area in a pnp some don't */
1091 intel_private
.resource_valid
= 0;
1095 static void intel_i9xx_setup_flush(void)
1097 /* return if already configured */
1098 if (intel_private
.ifp_resource
.start
)
1104 /* setup a resource for this object */
1105 intel_private
.ifp_resource
.name
= "Intel Flush Page";
1106 intel_private
.ifp_resource
.flags
= IORESOURCE_MEM
;
1108 /* Setup chipset flush for 915 */
1109 if (IS_I965
|| IS_G33
|| IS_G4X
) {
1110 intel_i965_g33_setup_chipset_flush();
1112 intel_i915_setup_chipset_flush();
1115 if (intel_private
.ifp_resource
.start
)
1116 intel_private
.i9xx_flush_page
= ioremap_nocache(intel_private
.ifp_resource
.start
, PAGE_SIZE
);
1117 if (!intel_private
.i9xx_flush_page
)
1118 dev_err(&intel_private
.pcidev
->dev
,
1119 "can't ioremap flush page - no chipset flushing\n");
1122 static int intel_i9xx_configure(void)
1124 struct aper_size_info_fixed
*current_size
;
1129 current_size
= A_SIZE_FIX(agp_bridge
->current_size
);
1131 pci_read_config_dword(intel_private
.pcidev
, I915_GMADDR
, &temp
);
1133 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1135 pci_read_config_word(intel_private
.bridge_dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
1136 gmch_ctrl
|= I830_GMCH_ENABLED
;
1137 pci_write_config_word(intel_private
.bridge_dev
, I830_GMCH_CTRL
, gmch_ctrl
);
1139 writel(agp_bridge
->gatt_bus_addr
|I810_PGETBL_ENABLED
, intel_private
.registers
+I810_PGETBL_CTL
);
1140 readl(intel_private
.registers
+I810_PGETBL_CTL
); /* PCI Posting. */
1142 if (agp_bridge
->driver
->needs_scratch_page
) {
1143 for (i
= intel_private
.base
.gtt_stolen_entries
; i
<
1144 intel_private
.base
.gtt_total_entries
; i
++) {
1145 writel(agp_bridge
->scratch_page
, intel_private
.gtt
+i
);
1147 readl(intel_private
.gtt
+i
-1); /* PCI Posting. */
1150 global_cache_flush();
1152 intel_i9xx_setup_flush();
1157 static void intel_i915_cleanup(void)
1159 if (intel_private
.i9xx_flush_page
)
1160 iounmap(intel_private
.i9xx_flush_page
);
1161 if (intel_private
.resource_valid
)
1162 release_resource(&intel_private
.ifp_resource
);
1163 intel_private
.ifp_resource
.start
= 0;
1164 intel_private
.resource_valid
= 0;
1165 iounmap(intel_private
.gtt
);
1166 iounmap(intel_private
.registers
);
1169 static void intel_i915_chipset_flush(struct agp_bridge_data
*bridge
)
1171 if (intel_private
.i9xx_flush_page
)
1172 writel(1, intel_private
.i9xx_flush_page
);
1175 static int intel_i915_insert_entries(struct agp_memory
*mem
, off_t pg_start
,
1183 if (mem
->page_count
== 0)
1186 temp
= agp_bridge
->current_size
;
1187 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
1189 if (pg_start
< intel_private
.base
.gtt_stolen_entries
) {
1190 dev_printk(KERN_DEBUG
, &intel_private
.pcidev
->dev
,
1191 "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
1192 pg_start
, intel_private
.base
.gtt_stolen_entries
);
1194 dev_info(&intel_private
.pcidev
->dev
,
1195 "trying to insert into local/stolen memory\n");
1199 if ((pg_start
+ mem
->page_count
) > num_entries
)
1202 /* The i915 can't check the GTT for entries since it's read only;
1203 * depend on the caller to make the correct offset decisions.
1206 if (type
!= mem
->type
)
1209 mask_type
= agp_bridge
->driver
->agp_type_to_mask_type(agp_bridge
, type
);
1211 if (!IS_SNB
&& mask_type
!= 0 && mask_type
!= AGP_PHYS_MEMORY
&&
1212 mask_type
!= INTEL_AGP_CACHED_MEMORY
)
1215 if (!mem
->is_flushed
)
1216 global_cache_flush();
1218 intel_agp_insert_sg_entries(mem
, pg_start
, mask_type
);
1223 mem
->is_flushed
= true;
1227 static int intel_i915_remove_entries(struct agp_memory
*mem
, off_t pg_start
,
1232 if (mem
->page_count
== 0)
1235 if (pg_start
< intel_private
.base
.gtt_stolen_entries
) {
1236 dev_info(&intel_private
.pcidev
->dev
,
1237 "trying to disable local/stolen memory\n");
1241 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++)
1242 writel(agp_bridge
->scratch_page
, intel_private
.gtt
+i
);
1244 readl(intel_private
.gtt
+i
-1);
1249 /* Return the aperture size by just checking the resource length. The effect
1250 * described in the spec of the MSAC registers is just changing of the
1253 static int intel_i915_get_gtt_size(void)
1260 /* G33's GTT size defined in gmch_ctrl */
1261 pci_read_config_word(intel_private
.bridge_dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
1262 switch (gmch_ctrl
& I830_GMCH_GMS_MASK
) {
1263 case I830_GMCH_GMS_STOLEN_512
:
1266 case I830_GMCH_GMS_STOLEN_1024
:
1269 case I830_GMCH_GMS_STOLEN_8192
:
1273 dev_info(&intel_private
.bridge_dev
->dev
,
1274 "unknown page table size 0x%x, assuming 512KB\n",
1275 (gmch_ctrl
& I830_GMCH_GMS_MASK
));
1279 /* On previous hardware, the GTT size was just what was
1280 * required to map the aperture.
1282 size
= agp_bridge
->driver
->fetch_size();
1288 /* The intel i915 automatically initializes the agp aperture during POST.
1289 * Use the memory already set aside for in the GTT.
1291 static int intel_i915_create_gatt_table(struct agp_bridge_data
*bridge
)
1293 int page_order
, ret
;
1294 struct aper_size_info_fixed
*size
;
1299 size
= agp_bridge
->current_size
;
1300 page_order
= size
->page_order
;
1301 num_entries
= size
->num_entries
;
1302 agp_bridge
->gatt_table_real
= NULL
;
1304 pci_read_config_dword(intel_private
.pcidev
, I915_MMADDR
, &temp
);
1305 pci_read_config_dword(intel_private
.pcidev
, I915_PTEADDR
, &temp2
);
1307 gtt_map_size
= intel_i915_get_gtt_size();
1309 intel_private
.gtt
= ioremap(temp2
, gtt_map_size
);
1310 if (!intel_private
.gtt
)
1313 intel_private
.base
.gtt_total_entries
= gtt_map_size
/ 4;
1317 intel_private
.registers
= ioremap(temp
, 128 * 4096);
1318 if (!intel_private
.registers
) {
1319 iounmap(intel_private
.gtt
);
1323 temp
= readl(intel_private
.registers
+I810_PGETBL_CTL
) & 0xfffff000;
1324 global_cache_flush(); /* FIXME: ? */
1326 ret
= intel_gtt_init();
1328 iounmap(intel_private
.gtt
);
1332 agp_bridge
->gatt_table
= NULL
;
1334 agp_bridge
->gatt_bus_addr
= temp
;
1340 * The i965 supports 36-bit physical addresses, but to keep
1341 * the format of the GTT the same, the bits that don't fit
1342 * in a 32-bit word are shifted down to bits 4..7.
1344 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1345 * is always zero on 32-bit architectures, so no need to make
1348 static unsigned long intel_i965_mask_memory(struct agp_bridge_data
*bridge
,
1349 dma_addr_t addr
, int type
)
1351 /* Shift high bits down */
1352 addr
|= (addr
>> 28) & 0xf0;
1354 /* Type checking must be done elsewhere */
1355 return addr
| bridge
->driver
->masks
[type
].mask
;
1358 static unsigned long intel_gen6_mask_memory(struct agp_bridge_data
*bridge
,
1359 dma_addr_t addr
, int type
)
1361 /* gen6 has bit11-4 for physical addr bit39-32 */
1362 addr
|= (addr
>> 28) & 0xff0;
1364 /* Type checking must be done elsewhere */
1365 return addr
| bridge
->driver
->masks
[type
].mask
;
1368 static void intel_i965_get_gtt_range(int *gtt_offset
, int *gtt_size
)
1372 switch (intel_private
.bridge_dev
->device
) {
1373 case PCI_DEVICE_ID_INTEL_GM45_HB
:
1374 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB
:
1375 case PCI_DEVICE_ID_INTEL_Q45_HB
:
1376 case PCI_DEVICE_ID_INTEL_G45_HB
:
1377 case PCI_DEVICE_ID_INTEL_G41_HB
:
1378 case PCI_DEVICE_ID_INTEL_B43_HB
:
1379 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB
:
1380 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB
:
1381 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB
:
1382 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB
:
1383 *gtt_offset
= *gtt_size
= MB(2);
1385 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB
:
1386 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB
:
1387 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB
:
1388 *gtt_offset
= MB(2);
1390 pci_read_config_word(intel_private
.pcidev
, SNB_GMCH_CTRL
, &snb_gmch_ctl
);
1391 switch (snb_gmch_ctl
& SNB_GTT_SIZE_MASK
) {
1393 case SNB_GTT_SIZE_0M
:
1394 printk(KERN_ERR
"Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl
);
1397 case SNB_GTT_SIZE_1M
:
1400 case SNB_GTT_SIZE_2M
:
1406 *gtt_offset
= *gtt_size
= KB(512);
1410 /* The intel i965 automatically initializes the agp aperture during POST.
1411 * Use the memory already set aside for in the GTT.
1413 static int intel_i965_create_gatt_table(struct agp_bridge_data
*bridge
)
1415 int page_order
, ret
;
1416 struct aper_size_info_fixed
*size
;
1419 int gtt_offset
, gtt_size
;
1421 size
= agp_bridge
->current_size
;
1422 page_order
= size
->page_order
;
1423 num_entries
= size
->num_entries
;
1424 agp_bridge
->gatt_table_real
= NULL
;
1426 pci_read_config_dword(intel_private
.pcidev
, I915_MMADDR
, &temp
);
1430 intel_i965_get_gtt_range(>t_offset
, >t_size
);
1432 intel_private
.gtt
= ioremap((temp
+ gtt_offset
) , gtt_size
);
1434 if (!intel_private
.gtt
)
1437 intel_private
.base
.gtt_total_entries
= gtt_size
/ 4;
1439 intel_private
.registers
= ioremap(temp
, 128 * 4096);
1440 if (!intel_private
.registers
) {
1441 iounmap(intel_private
.gtt
);
1445 temp
= readl(intel_private
.registers
+I810_PGETBL_CTL
) & 0xfffff000;
1446 global_cache_flush(); /* FIXME: ? */
1448 ret
= intel_gtt_init();
1450 iounmap(intel_private
.gtt
);
1454 agp_bridge
->gatt_table
= NULL
;
1456 agp_bridge
->gatt_bus_addr
= temp
;
1461 static const struct agp_bridge_driver intel_810_driver
= {
1462 .owner
= THIS_MODULE
,
1463 .aperture_sizes
= intel_i810_sizes
,
1464 .size_type
= FIXED_APER_SIZE
,
1465 .num_aperture_sizes
= 2,
1466 .needs_scratch_page
= true,
1467 .configure
= intel_i810_configure
,
1468 .fetch_size
= intel_i810_fetch_size
,
1469 .cleanup
= intel_i810_cleanup
,
1470 .mask_memory
= intel_i810_mask_memory
,
1471 .masks
= intel_i810_masks
,
1472 .agp_enable
= intel_i810_agp_enable
,
1473 .cache_flush
= global_cache_flush
,
1474 .create_gatt_table
= agp_generic_create_gatt_table
,
1475 .free_gatt_table
= agp_generic_free_gatt_table
,
1476 .insert_memory
= intel_i810_insert_entries
,
1477 .remove_memory
= intel_i810_remove_entries
,
1478 .alloc_by_type
= intel_i810_alloc_by_type
,
1479 .free_by_type
= intel_i810_free_by_type
,
1480 .agp_alloc_page
= agp_generic_alloc_page
,
1481 .agp_alloc_pages
= agp_generic_alloc_pages
,
1482 .agp_destroy_page
= agp_generic_destroy_page
,
1483 .agp_destroy_pages
= agp_generic_destroy_pages
,
1484 .agp_type_to_mask_type
= agp_generic_type_to_mask_type
,
1487 static const struct agp_bridge_driver intel_830_driver
= {
1488 .owner
= THIS_MODULE
,
1489 .aperture_sizes
= intel_i830_sizes
,
1490 .size_type
= FIXED_APER_SIZE
,
1491 .num_aperture_sizes
= 4,
1492 .needs_scratch_page
= true,
1493 .configure
= intel_i830_configure
,
1494 .fetch_size
= intel_fake_agp_fetch_size
,
1495 .cleanup
= intel_i830_cleanup
,
1496 .mask_memory
= intel_i810_mask_memory
,
1497 .masks
= intel_i810_masks
,
1498 .agp_enable
= intel_i810_agp_enable
,
1499 .cache_flush
= global_cache_flush
,
1500 .create_gatt_table
= intel_i830_create_gatt_table
,
1501 .free_gatt_table
= intel_i830_free_gatt_table
,
1502 .insert_memory
= intel_i830_insert_entries
,
1503 .remove_memory
= intel_i830_remove_entries
,
1504 .alloc_by_type
= intel_i830_alloc_by_type
,
1505 .free_by_type
= intel_i810_free_by_type
,
1506 .agp_alloc_page
= agp_generic_alloc_page
,
1507 .agp_alloc_pages
= agp_generic_alloc_pages
,
1508 .agp_destroy_page
= agp_generic_destroy_page
,
1509 .agp_destroy_pages
= agp_generic_destroy_pages
,
1510 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1511 .chipset_flush
= intel_i830_chipset_flush
,
1514 static const struct agp_bridge_driver intel_915_driver
= {
1515 .owner
= THIS_MODULE
,
1516 .aperture_sizes
= intel_i830_sizes
,
1517 .size_type
= FIXED_APER_SIZE
,
1518 .num_aperture_sizes
= 4,
1519 .needs_scratch_page
= true,
1520 .configure
= intel_i9xx_configure
,
1521 .fetch_size
= intel_fake_agp_fetch_size
,
1522 .cleanup
= intel_i915_cleanup
,
1523 .mask_memory
= intel_i810_mask_memory
,
1524 .masks
= intel_i810_masks
,
1525 .agp_enable
= intel_i810_agp_enable
,
1526 .cache_flush
= global_cache_flush
,
1527 .create_gatt_table
= intel_i915_create_gatt_table
,
1528 .free_gatt_table
= intel_i830_free_gatt_table
,
1529 .insert_memory
= intel_i915_insert_entries
,
1530 .remove_memory
= intel_i915_remove_entries
,
1531 .alloc_by_type
= intel_i830_alloc_by_type
,
1532 .free_by_type
= intel_i810_free_by_type
,
1533 .agp_alloc_page
= agp_generic_alloc_page
,
1534 .agp_alloc_pages
= agp_generic_alloc_pages
,
1535 .agp_destroy_page
= agp_generic_destroy_page
,
1536 .agp_destroy_pages
= agp_generic_destroy_pages
,
1537 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1538 .chipset_flush
= intel_i915_chipset_flush
,
1539 #ifdef USE_PCI_DMA_API
1540 .agp_map_page
= intel_agp_map_page
,
1541 .agp_unmap_page
= intel_agp_unmap_page
,
1542 .agp_map_memory
= intel_agp_map_memory
,
1543 .agp_unmap_memory
= intel_agp_unmap_memory
,
1547 static const struct agp_bridge_driver intel_i965_driver
= {
1548 .owner
= THIS_MODULE
,
1549 .aperture_sizes
= intel_i830_sizes
,
1550 .size_type
= FIXED_APER_SIZE
,
1551 .num_aperture_sizes
= 4,
1552 .needs_scratch_page
= true,
1553 .configure
= intel_i9xx_configure
,
1554 .fetch_size
= intel_fake_agp_fetch_size
,
1555 .cleanup
= intel_i915_cleanup
,
1556 .mask_memory
= intel_i965_mask_memory
,
1557 .masks
= intel_i810_masks
,
1558 .agp_enable
= intel_i810_agp_enable
,
1559 .cache_flush
= global_cache_flush
,
1560 .create_gatt_table
= intel_i965_create_gatt_table
,
1561 .free_gatt_table
= intel_i830_free_gatt_table
,
1562 .insert_memory
= intel_i915_insert_entries
,
1563 .remove_memory
= intel_i915_remove_entries
,
1564 .alloc_by_type
= intel_i830_alloc_by_type
,
1565 .free_by_type
= intel_i810_free_by_type
,
1566 .agp_alloc_page
= agp_generic_alloc_page
,
1567 .agp_alloc_pages
= agp_generic_alloc_pages
,
1568 .agp_destroy_page
= agp_generic_destroy_page
,
1569 .agp_destroy_pages
= agp_generic_destroy_pages
,
1570 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1571 .chipset_flush
= intel_i915_chipset_flush
,
1572 #ifdef USE_PCI_DMA_API
1573 .agp_map_page
= intel_agp_map_page
,
1574 .agp_unmap_page
= intel_agp_unmap_page
,
1575 .agp_map_memory
= intel_agp_map_memory
,
1576 .agp_unmap_memory
= intel_agp_unmap_memory
,
1580 static const struct agp_bridge_driver intel_gen6_driver
= {
1581 .owner
= THIS_MODULE
,
1582 .aperture_sizes
= intel_i830_sizes
,
1583 .size_type
= FIXED_APER_SIZE
,
1584 .num_aperture_sizes
= 4,
1585 .needs_scratch_page
= true,
1586 .configure
= intel_i9xx_configure
,
1587 .fetch_size
= intel_fake_agp_fetch_size
,
1588 .cleanup
= intel_i915_cleanup
,
1589 .mask_memory
= intel_gen6_mask_memory
,
1590 .masks
= intel_gen6_masks
,
1591 .agp_enable
= intel_i810_agp_enable
,
1592 .cache_flush
= global_cache_flush
,
1593 .create_gatt_table
= intel_i965_create_gatt_table
,
1594 .free_gatt_table
= intel_i830_free_gatt_table
,
1595 .insert_memory
= intel_i915_insert_entries
,
1596 .remove_memory
= intel_i915_remove_entries
,
1597 .alloc_by_type
= intel_i830_alloc_by_type
,
1598 .free_by_type
= intel_i810_free_by_type
,
1599 .agp_alloc_page
= agp_generic_alloc_page
,
1600 .agp_alloc_pages
= agp_generic_alloc_pages
,
1601 .agp_destroy_page
= agp_generic_destroy_page
,
1602 .agp_destroy_pages
= agp_generic_destroy_pages
,
1603 .agp_type_to_mask_type
= intel_gen6_type_to_mask_type
,
1604 .chipset_flush
= intel_i915_chipset_flush
,
1605 #ifdef USE_PCI_DMA_API
1606 .agp_map_page
= intel_agp_map_page
,
1607 .agp_unmap_page
= intel_agp_unmap_page
,
1608 .agp_map_memory
= intel_agp_map_memory
,
1609 .agp_unmap_memory
= intel_agp_unmap_memory
,
1613 static const struct agp_bridge_driver intel_g33_driver
= {
1614 .owner
= THIS_MODULE
,
1615 .aperture_sizes
= intel_i830_sizes
,
1616 .size_type
= FIXED_APER_SIZE
,
1617 .num_aperture_sizes
= 4,
1618 .needs_scratch_page
= true,
1619 .configure
= intel_i9xx_configure
,
1620 .fetch_size
= intel_fake_agp_fetch_size
,
1621 .cleanup
= intel_i915_cleanup
,
1622 .mask_memory
= intel_i965_mask_memory
,
1623 .masks
= intel_i810_masks
,
1624 .agp_enable
= intel_i810_agp_enable
,
1625 .cache_flush
= global_cache_flush
,
1626 .create_gatt_table
= intel_i915_create_gatt_table
,
1627 .free_gatt_table
= intel_i830_free_gatt_table
,
1628 .insert_memory
= intel_i915_insert_entries
,
1629 .remove_memory
= intel_i915_remove_entries
,
1630 .alloc_by_type
= intel_i830_alloc_by_type
,
1631 .free_by_type
= intel_i810_free_by_type
,
1632 .agp_alloc_page
= agp_generic_alloc_page
,
1633 .agp_alloc_pages
= agp_generic_alloc_pages
,
1634 .agp_destroy_page
= agp_generic_destroy_page
,
1635 .agp_destroy_pages
= agp_generic_destroy_pages
,
1636 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1637 .chipset_flush
= intel_i915_chipset_flush
,
1638 #ifdef USE_PCI_DMA_API
1639 .agp_map_page
= intel_agp_map_page
,
1640 .agp_unmap_page
= intel_agp_unmap_page
,
1641 .agp_map_memory
= intel_agp_map_memory
,
1642 .agp_unmap_memory
= intel_agp_unmap_memory
,
1646 /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
1647 * driver and gmch_driver must be non-null, and find_gmch will determine
1648 * which one should be used if a gmch_chip_id is present.
1650 static const struct intel_gtt_driver_description
{
1651 unsigned int gmch_chip_id
;
1653 const struct agp_bridge_driver
*gmch_driver
;
1654 } intel_gtt_chipsets
[] = {
1655 { PCI_DEVICE_ID_INTEL_82810_IG1
, "i810", &intel_810_driver
},
1656 { PCI_DEVICE_ID_INTEL_82810_IG3
, "i810", &intel_810_driver
},
1657 { PCI_DEVICE_ID_INTEL_82810E_IG
, "i810", &intel_810_driver
},
1658 { PCI_DEVICE_ID_INTEL_82815_CGC
, "i815", &intel_810_driver
},
1659 { PCI_DEVICE_ID_INTEL_82830_CGC
, "830M", &intel_830_driver
},
1660 { PCI_DEVICE_ID_INTEL_82845G_IG
, "830M", &intel_830_driver
},
1661 { PCI_DEVICE_ID_INTEL_82854_IG
, "854", &intel_830_driver
},
1662 { PCI_DEVICE_ID_INTEL_82855GM_IG
, "855GM", &intel_830_driver
},
1663 { PCI_DEVICE_ID_INTEL_82865_IG
, "865", &intel_830_driver
},
1664 { PCI_DEVICE_ID_INTEL_E7221_IG
, "E7221 (i915)", &intel_915_driver
},
1665 { PCI_DEVICE_ID_INTEL_82915G_IG
, "915G", &intel_915_driver
},
1666 { PCI_DEVICE_ID_INTEL_82915GM_IG
, "915GM", &intel_915_driver
},
1667 { PCI_DEVICE_ID_INTEL_82945G_IG
, "945G", &intel_915_driver
},
1668 { PCI_DEVICE_ID_INTEL_82945GM_IG
, "945GM", &intel_915_driver
},
1669 { PCI_DEVICE_ID_INTEL_82945GME_IG
, "945GME", &intel_915_driver
},
1670 { PCI_DEVICE_ID_INTEL_82946GZ_IG
, "946GZ", &intel_i965_driver
},
1671 { PCI_DEVICE_ID_INTEL_82G35_IG
, "G35", &intel_i965_driver
},
1672 { PCI_DEVICE_ID_INTEL_82965Q_IG
, "965Q", &intel_i965_driver
},
1673 { PCI_DEVICE_ID_INTEL_82965G_IG
, "965G", &intel_i965_driver
},
1674 { PCI_DEVICE_ID_INTEL_82965GM_IG
, "965GM", &intel_i965_driver
},
1675 { PCI_DEVICE_ID_INTEL_82965GME_IG
, "965GME/GLE", &intel_i965_driver
},
1676 { PCI_DEVICE_ID_INTEL_G33_IG
, "G33", &intel_g33_driver
},
1677 { PCI_DEVICE_ID_INTEL_Q35_IG
, "Q35", &intel_g33_driver
},
1678 { PCI_DEVICE_ID_INTEL_Q33_IG
, "Q33", &intel_g33_driver
},
1679 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG
, "GMA3150", &intel_g33_driver
},
1680 { PCI_DEVICE_ID_INTEL_PINEVIEW_IG
, "GMA3150", &intel_g33_driver
},
1681 { PCI_DEVICE_ID_INTEL_GM45_IG
, "GM45", &intel_i965_driver
},
1682 { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG
, "Eaglelake", &intel_i965_driver
},
1683 { PCI_DEVICE_ID_INTEL_Q45_IG
, "Q45/Q43", &intel_i965_driver
},
1684 { PCI_DEVICE_ID_INTEL_G45_IG
, "G45/G43", &intel_i965_driver
},
1685 { PCI_DEVICE_ID_INTEL_B43_IG
, "B43", &intel_i965_driver
},
1686 { PCI_DEVICE_ID_INTEL_G41_IG
, "G41", &intel_i965_driver
},
1687 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG
,
1688 "HD Graphics", &intel_i965_driver
},
1689 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG
,
1690 "HD Graphics", &intel_i965_driver
},
1691 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG
,
1692 "Sandybridge", &intel_gen6_driver
},
1693 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG
,
1694 "Sandybridge", &intel_gen6_driver
},
1695 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG
,
1696 "Sandybridge", &intel_gen6_driver
},
1697 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG
,
1698 "Sandybridge", &intel_gen6_driver
},
1699 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG
,
1700 "Sandybridge", &intel_gen6_driver
},
1701 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG
,
1702 "Sandybridge", &intel_gen6_driver
},
1703 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG
,
1704 "Sandybridge", &intel_gen6_driver
},
1708 static int find_gmch(u16 device
)
1710 struct pci_dev
*gmch_device
;
1712 gmch_device
= pci_get_device(PCI_VENDOR_ID_INTEL
, device
, NULL
);
1713 if (gmch_device
&& PCI_FUNC(gmch_device
->devfn
) != 0) {
1714 gmch_device
= pci_get_device(PCI_VENDOR_ID_INTEL
,
1715 device
, gmch_device
);
1721 intel_private
.pcidev
= gmch_device
;
1725 int intel_gmch_probe(struct pci_dev
*pdev
,
1726 struct agp_bridge_data
*bridge
)
1729 bridge
->driver
= NULL
;
1731 for (i
= 0; intel_gtt_chipsets
[i
].name
!= NULL
; i
++) {
1732 if (find_gmch(intel_gtt_chipsets
[i
].gmch_chip_id
)) {
1734 intel_gtt_chipsets
[i
].gmch_driver
;
1739 if (!bridge
->driver
)
1742 bridge
->dev_private_data
= &intel_private
;
1745 intel_private
.bridge_dev
= pci_dev_get(pdev
);
1747 dev_info(&pdev
->dev
, "Intel %s Chipset\n", intel_gtt_chipsets
[i
].name
);
1749 if (bridge
->driver
->mask_memory
== intel_gen6_mask_memory
)
1751 else if (bridge
->driver
->mask_memory
== intel_i965_mask_memory
)
1756 if (pci_set_dma_mask(intel_private
.pcidev
, DMA_BIT_MASK(mask
)))
1757 dev_err(&intel_private
.pcidev
->dev
,
1758 "set gfx device dma mask %d-bit failed!\n", mask
);
1760 pci_set_consistent_dma_mask(intel_private
.pcidev
,
1761 DMA_BIT_MASK(mask
));
1763 if (bridge
->driver
== &intel_810_driver
)
1766 intel_private
.base
.gtt_mappable_entries
= intel_gtt_mappable_entries();
1770 EXPORT_SYMBOL(intel_gmch_probe
);
1772 void intel_gmch_remove(struct pci_dev
*pdev
)
1774 if (intel_private
.pcidev
)
1775 pci_dev_put(intel_private
.pcidev
);
1776 if (intel_private
.bridge_dev
)
1777 pci_dev_put(intel_private
.bridge_dev
);
1779 EXPORT_SYMBOL(intel_gmch_remove
);
1781 MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
1782 MODULE_LICENSE("GPL and additional rights");