2 * Copyright (c) 2007, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
19 * Alan Cox <alan@linux.intel.com>
27 * GTT resource allocator - manage page mappings in GTT space
31 * psb_gtt_mask_pte - generate GART pte entry
32 * @pfn: page number to encode
33 * @type: type of memory in the GART
35 * Set the GART entry for the appropriate memory type.
37 static inline uint32_t psb_gtt_mask_pte(uint32_t pfn
, int type
)
39 uint32_t mask
= PSB_PTE_VALID
;
41 if (type
& PSB_MMU_CACHED_MEMORY
)
42 mask
|= PSB_PTE_CACHED
;
43 if (type
& PSB_MMU_RO_MEMORY
)
45 if (type
& PSB_MMU_WO_MEMORY
)
48 return (pfn
<< PAGE_SHIFT
) | mask
;
52 * psb_gtt_entry - find the GART entries for a gtt_range
53 * @dev: our DRM device
56 * Given a gtt_range object return the GART offset of the page table
57 * entries for this gtt_range
59 u32
*psb_gtt_entry(struct drm_device
*dev
, struct gtt_range
*r
)
61 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
64 offset
= r
->resource
.start
- dev_priv
->gtt_mem
->start
;
66 return dev_priv
->gtt_map
+ (offset
>> PAGE_SHIFT
);
70 * psb_gtt_insert - put an object into the GART
71 * @dev: our DRM device
74 * Take our preallocated GTT range and insert the GEM object into
79 static int psb_gtt_insert(struct drm_device
*dev
, struct gtt_range
*r
)
82 int numpages
= (r
->resource
.end
+ 1 - r
->resource
.start
) >> PAGE_SHIFT
;
86 if (r
->pages
== NULL
) {
91 WARN_ON(r
->stolen
); /* refcount these maybe ? */
93 gtt_slot
= psb_gtt_entry(dev
, r
);
96 /* Make sure we have no alias present */
99 /* Write our page entries into the GART itself */
100 for (i
= 0; i
< numpages
; i
++) {
101 pte
= psb_gtt_mask_pte(page_to_pfn(*pages
++), 0/*type*/);
102 iowrite32(pte
, gtt_slot
++);
104 /* Make sure all the entries are set before we return */
105 ioread32(gtt_slot
- 1);
111 * psb_gtt_remove - remove an object from the GART
112 * @dev: our DRM device
115 * Remove a preallocated GTT range from the GART. Overwrite all the
116 * page table entries with the dummy page
119 static void psb_gtt_remove(struct drm_device
*dev
, struct gtt_range
*r
)
121 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
123 int numpages
= (r
->resource
.end
+ 1 - r
->resource
.start
) >> PAGE_SHIFT
;
128 gtt_slot
= psb_gtt_entry(dev
, r
);
129 pte
= psb_gtt_mask_pte(page_to_pfn(dev_priv
->scratch_page
), 0);;
131 for (i
= 0; i
< numpages
; i
++)
132 iowrite32(pte
, gtt_slot
++);
133 ioread32(gtt_slot
- 1);
137 * psb_gtt_attach_pages - attach and pin GEM pages
140 * Pin and build an in kernel list of the pages that back our GEM object.
141 * While we hold this the pages cannot be swapped out
143 * FIXME: Do we need to cache flush when we update the GTT
145 static int psb_gtt_attach_pages(struct gtt_range
*gt
)
148 struct address_space
*mapping
;
151 int pages
= (gt
->resource
.end
+ 1 - gt
->resource
.start
) >> PAGE_SHIFT
;
155 /* This is the shared memory object that backs the GEM resource */
156 inode
= gt
->gem
.filp
->f_path
.dentry
->d_inode
;
157 mapping
= inode
->i_mapping
;
159 gt
->pages
= kmalloc(pages
* sizeof(struct page
*), GFP_KERNEL
);
160 if (gt
->pages
== NULL
)
162 for (i
= 0; i
< pages
; i
++) {
163 /* FIXME: review flags later */
164 p
= read_cache_page_gfp(mapping
, i
,
165 __GFP_COLD
| GFP_KERNEL
);
174 page_cache_release(gt
->pages
[i
]);
181 * psb_gtt_detach_pages - attach and pin GEM pages
184 * Undo the effect of psb_gtt_attach_pages. At this point the pages
185 * must have been removed from the GART as they could now be paged out
186 * and move bus address.
188 * FIXME: Do we need to cache flush when we update the GTT
190 static void psb_gtt_detach_pages(struct gtt_range
*gt
)
193 int pages
= (gt
->resource
.end
+ 1 - gt
->resource
.start
) >> PAGE_SHIFT
;
195 for (i
= 0; i
< pages
; i
++) {
196 /* FIXME: do we need to force dirty */
197 set_page_dirty(gt
->pages
[i
]);
198 /* Undo the reference we took when populating the table */
199 page_cache_release(gt
->pages
[i
]);
206 * psb_gtt_pin - pin pages into the GTT
209 * Pin a set of pages into the GTT. The pins are refcounted so that
210 * multiple pins need multiple unpins to undo.
212 * Non GEM backed objects treat this as a no-op as they are always GTT
215 int psb_gtt_pin(struct gtt_range
*gt
)
218 struct drm_device
*dev
= gt
->gem
.dev
;
219 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
221 mutex_lock(&dev_priv
->gtt_mutex
);
223 if (gt
->in_gart
== 0 && gt
->stolen
== 0) {
224 ret
= psb_gtt_attach_pages(gt
);
227 ret
= psb_gtt_insert(dev
, gt
);
229 psb_gtt_detach_pages(gt
);
235 mutex_unlock(&dev_priv
->gtt_mutex
);
240 * psb_gtt_unpin - Drop a GTT pin requirement
243 * Undoes the effect of psb_gtt_pin. On the last drop the GEM object
244 * will be removed from the GTT which will also drop the page references
245 * and allow the VM to clean up or page stuff.
247 * Non GEM backed objects treat this as a no-op as they are always GTT
250 void psb_gtt_unpin(struct gtt_range
*gt
)
252 struct drm_device
*dev
= gt
->gem
.dev
;
253 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
255 mutex_lock(&dev_priv
->gtt_mutex
);
257 WARN_ON(!gt
->in_gart
);
260 if (gt
->in_gart
== 0 && gt
->stolen
== 0) {
261 psb_gtt_remove(dev
, gt
);
262 psb_gtt_detach_pages(gt
);
264 mutex_unlock(&dev_priv
->gtt_mutex
);
268 * GTT resource allocator - allocate and manage GTT address space
272 * psb_gtt_alloc_range - allocate GTT address space
273 * @dev: Our DRM device
274 * @len: length (bytes) of address space required
275 * @name: resource name
276 * @backed: resource should be backed by stolen pages
278 * Ask the kernel core to find us a suitable range of addresses
279 * to use for a GTT mapping.
281 * Returns a gtt_range structure describing the object, or NULL on
282 * error. On successful return the resource is both allocated and marked
285 struct gtt_range
*psb_gtt_alloc_range(struct drm_device
*dev
, int len
,
286 const char *name
, int backed
)
288 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
289 struct gtt_range
*gt
;
290 struct resource
*r
= dev_priv
->gtt_mem
;
292 unsigned long start
, end
;
295 /* The start of the GTT is the stolen pages */
297 end
= r
->start
+ dev_priv
->pg
->stolen_size
- 1;
299 /* The rest we will use for GEM backed objects */
300 start
= r
->start
+ dev_priv
->pg
->stolen_size
;
304 gt
= kzalloc(sizeof(struct gtt_range
), GFP_KERNEL
);
307 gt
->resource
.name
= name
;
309 gt
->in_gart
= backed
;
310 /* Ensure this is set for non GEM objects */
312 kref_init(>
->kref
);
314 ret
= allocate_resource(dev_priv
->gtt_mem
, >
->resource
,
315 len
, start
, end
, PAGE_SIZE
, NULL
, NULL
);
317 gt
->offset
= gt
->resource
.start
- r
->start
;
325 * psb_gtt_destroy - final free up of a gtt
326 * @kref: the kref of the gtt
328 * Called from the kernel kref put when the final reference to our
329 * GTT object is dropped. At that point we can free up the resources.
331 * For now we handle mmap clean up here to work around limits in GEM
333 static void psb_gtt_destroy(struct kref
*kref
)
335 struct gtt_range
*gt
= container_of(kref
, struct gtt_range
, kref
);
337 /* Undo the mmap pin if we are destroying the object */
342 WARN_ON(gt
->in_gart
&& !gt
->stolen
);
343 release_resource(>
->resource
);
348 * psb_gtt_kref_put - drop reference to a GTT object
349 * @gt: the GT being dropped
351 * Drop a reference to a psb gtt
353 void psb_gtt_kref_put(struct gtt_range
*gt
)
355 kref_put(>
->kref
, psb_gtt_destroy
);
359 * psb_gtt_free_range - release GTT address space
360 * @dev: our DRM device
361 * @gt: a mapping created with psb_gtt_alloc_range
363 * Release a resource that was allocated with psb_gtt_alloc_range
365 void psb_gtt_free_range(struct drm_device
*dev
, struct gtt_range
*gt
)
367 psb_gtt_kref_put(gt
);
371 struct psb_gtt
*psb_gtt_alloc(struct drm_device
*dev
)
373 struct psb_gtt
*tmp
= kzalloc(sizeof(*tmp
), GFP_KERNEL
);
378 init_rwsem(&tmp
->sem
);
384 void psb_gtt_takedown(struct drm_device
*dev
)
386 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
388 /* FIXME: iounmap dev_priv->vram_addr etc */
389 if (dev_priv
->gtt_map
) {
390 iounmap(dev_priv
->gtt_map
);
391 dev_priv
->gtt_map
= NULL
;
393 if (dev_priv
->gtt_initialized
) {
394 pci_write_config_word(dev
->pdev
, PSB_GMCH_CTRL
,
395 dev_priv
->gmch_ctrl
);
396 PSB_WVDC32(dev_priv
->pge_ctl
, PSB_PGETBL_CTL
);
397 (void) PSB_RVDC32(PSB_PGETBL_CTL
);
403 int psb_gtt_init(struct drm_device
*dev
, int resume
)
405 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
407 unsigned long stolen_size
, vram_stolen_size
;
408 unsigned i
, num_pages
;
412 uint32_t *ttm_gtt_map
;
413 uint32_t dvmt_mode
= 0;
419 mutex_init(&dev_priv
->gtt_mutex
);
421 dev_priv
->pg
= pg
= psb_gtt_alloc(dev
);
425 pci_read_config_word(dev
->pdev
, PSB_GMCH_CTRL
, &dev_priv
->gmch_ctrl
);
426 pci_write_config_word(dev
->pdev
, PSB_GMCH_CTRL
,
427 dev_priv
->gmch_ctrl
| _PSB_GMCH_ENABLED
);
429 dev_priv
->pge_ctl
= PSB_RVDC32(PSB_PGETBL_CTL
);
430 PSB_WVDC32(dev_priv
->pge_ctl
| _PSB_PGETBL_ENABLED
, PSB_PGETBL_CTL
);
431 (void) PSB_RVDC32(PSB_PGETBL_CTL
);
433 /* The root resource we allocate address space from */
434 dev_priv
->gtt_mem
= &dev
->pdev
->resource
[PSB_GATT_RESOURCE
];
436 dev_priv
->gtt_initialized
= 1;
438 pg
->gtt_phys_start
= dev_priv
->pge_ctl
& PAGE_MASK
;
440 pg
->gatt_start
= pci_resource_start(dev
->pdev
, PSB_GATT_RESOURCE
);
441 /* fix me: video mmu has hw bug to access 0x0D0000000,
442 * then make gatt start at 0x0e000,0000 */
443 pg
->mmu_gatt_start
= 0xE0000000;
444 pg
->gtt_start
= pci_resource_start(dev
->pdev
, PSB_GTT_RESOURCE
);
446 pci_resource_len(dev
->pdev
, PSB_GTT_RESOURCE
) >> PAGE_SHIFT
;
447 pg
->gatt_pages
= pci_resource_len(dev
->pdev
, PSB_GATT_RESOURCE
)
450 pci_read_config_dword(dev
->pdev
, PSB_BSM
, &dev_priv
->stolen_base
);
451 vram_stolen_size
= pg
->gtt_phys_start
- dev_priv
->stolen_base
- PAGE_SIZE
;
453 stolen_size
= vram_stolen_size
;
455 printk(KERN_INFO
"GMMADR(region 0) start: 0x%08x (%dM).\n",
456 pg
->gatt_start
, pg
->gatt_pages
/256);
457 printk(KERN_INFO
"GTTADR(region 3) start: 0x%08x (can map %dM RAM), and actual RAM base 0x%08x.\n",
458 pg
->gtt_start
, gtt_pages
* 4, pg
->gtt_phys_start
);
459 printk(KERN_INFO
"Stolen memory information\n");
460 printk(KERN_INFO
" base in RAM: 0x%x\n", dev_priv
->stolen_base
);
461 printk(KERN_INFO
" size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
462 vram_stolen_size
/1024);
463 dvmt_mode
= (dev_priv
->gmch_ctrl
>> 4) & 0x7;
464 printk(KERN_INFO
" the correct size should be: %dM(dvmt mode=%d)\n",
465 (dvmt_mode
== 1) ? 1 : (2 << (dvmt_mode
- 1)), dvmt_mode
);
467 if (resume
&& (gtt_pages
!= pg
->gtt_pages
) &&
468 (stolen_size
!= pg
->stolen_size
)) {
469 DRM_ERROR("GTT resume error.\n");
474 pg
->gtt_pages
= gtt_pages
;
475 pg
->stolen_size
= stolen_size
;
476 dev_priv
->vram_stolen_size
= vram_stolen_size
;
478 ioremap_nocache(pg
->gtt_phys_start
, gtt_pages
<< PAGE_SHIFT
);
479 if (!dev_priv
->gtt_map
) {
480 DRM_ERROR("Failure to map gtt.\n");
485 dev_priv
->vram_addr
= ioremap_wc(dev_priv
->stolen_base
, stolen_size
);
486 if (!dev_priv
->vram_addr
) {
487 DRM_ERROR("Failure to map stolen base.\n");
492 DRM_DEBUG("gma500: vram kernel virtual address %p\n", dev_priv
->vram_addr
);
494 tt_pages
= (pg
->gatt_pages
< PSB_TT_PRIV0_PLIMIT
) ?
495 (pg
->gatt_pages
) : PSB_TT_PRIV0_PLIMIT
;
497 ttm_gtt_map
= dev_priv
->gtt_map
+ tt_pages
/ 2;
500 * insert vram stolen pages.
503 pfn_base
= dev_priv
->stolen_base
>> PAGE_SHIFT
;
504 vram_pages
= num_pages
= vram_stolen_size
>> PAGE_SHIFT
;
505 printk(KERN_INFO
"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
506 num_pages
, pfn_base
, 0);
507 for (i
= 0; i
< num_pages
; ++i
) {
508 pte
= psb_gtt_mask_pte(pfn_base
+ i
, 0);
509 iowrite32(pte
, dev_priv
->gtt_map
+ i
);
513 * Init rest of gtt managed by IMG.
515 pfn_base
= page_to_pfn(dev_priv
->scratch_page
);
516 pte
= psb_gtt_mask_pte(pfn_base
, 0);
517 for (; i
< tt_pages
/ 2 - 1; ++i
)
518 iowrite32(pte
, dev_priv
->gtt_map
+ i
);
521 * Init rest of gtt managed by TTM.
524 pfn_base
= page_to_pfn(dev_priv
->scratch_page
);
525 pte
= psb_gtt_mask_pte(pfn_base
, 0);
526 PSB_DEBUG_INIT("Initializing the rest of a total "
527 "of %d gtt pages.\n", pg
->gatt_pages
);
529 for (; i
< pg
->gatt_pages
- tt_pages
/ 2; ++i
)
530 iowrite32(pte
, ttm_gtt_map
+ i
);
531 (void) ioread32(dev_priv
->gtt_map
+ i
- 1);
536 psb_gtt_takedown(dev
);