2 * Copyright (c) 2007, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
23 #include "psb_pvr_glue.h"
25 static inline uint32_t psb_gtt_mask_pte(uint32_t pfn
, int type
)
27 uint32_t mask
= PSB_PTE_VALID
;
29 if (type
& PSB_MMU_CACHED_MEMORY
)
30 mask
|= PSB_PTE_CACHED
;
31 if (type
& PSB_MMU_RO_MEMORY
)
33 if (type
& PSB_MMU_WO_MEMORY
)
36 return (pfn
<< PAGE_SHIFT
) | mask
;
39 struct psb_gtt
*psb_gtt_alloc(struct drm_device
*dev
)
41 struct psb_gtt
*tmp
= kzalloc(sizeof(*tmp
), GFP_KERNEL
);
46 init_rwsem(&tmp
->sem
);
52 void psb_gtt_takedown(struct psb_gtt
*pg
, int free
)
54 struct drm_psb_private
*dev_priv
= pg
->dev
->dev_private
;
63 if (pg
->initialized
) {
64 pci_write_config_word(pg
->dev
->pdev
, PSB_GMCH_CTRL
,
66 PSB_WVDC32(pg
->pge_ctl
, PSB_PGETBL_CTL
);
67 (void) PSB_RVDC32(PSB_PGETBL_CTL
);
73 int psb_gtt_init(struct psb_gtt
*pg
, int resume
)
75 struct drm_device
*dev
= pg
->dev
;
76 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
78 unsigned long stolen_size
, vram_stolen_size
, ci_stolen_size
;
79 unsigned long rar_stolen_size
;
80 unsigned i
, num_pages
;
82 uint32_t ci_pages
, vram_pages
;
84 uint32_t *ttm_gtt_map
;
85 uint32_t dvmt_mode
= 0;
90 pci_read_config_word(dev
->pdev
, PSB_GMCH_CTRL
, &pg
->gmch_ctrl
);
91 pci_write_config_word(dev
->pdev
, PSB_GMCH_CTRL
,
92 pg
->gmch_ctrl
| _PSB_GMCH_ENABLED
);
94 pg
->pge_ctl
= PSB_RVDC32(PSB_PGETBL_CTL
);
95 PSB_WVDC32(pg
->pge_ctl
| _PSB_PGETBL_ENABLED
, PSB_PGETBL_CTL
);
96 (void) PSB_RVDC32(PSB_PGETBL_CTL
);
100 pg
->gtt_phys_start
= pg
->pge_ctl
& PAGE_MASK
;
102 pg
->gatt_start
= pci_resource_start(dev
->pdev
, PSB_GATT_RESOURCE
);
103 /* fix me: video mmu has hw bug to access 0x0D0000000,
104 * then make gatt start at 0x0e000,0000 */
105 pg
->mmu_gatt_start
= PSB_MEM_TT_START
;
106 pg
->gtt_start
= pci_resource_start(dev
->pdev
, PSB_GTT_RESOURCE
);
108 pci_resource_len(dev
->pdev
, PSB_GTT_RESOURCE
) >> PAGE_SHIFT
;
109 pg
->gatt_pages
= pci_resource_len(dev
->pdev
, PSB_GATT_RESOURCE
)
112 pci_read_config_dword(dev
->pdev
, PSB_BSM
, &pg
->stolen_base
);
113 vram_stolen_size
= pg
->gtt_phys_start
- pg
->stolen_base
- PAGE_SIZE
;
115 /* CI is not included in the stolen size since the TOPAZ MMU bug */
116 ci_stolen_size
= dev_priv
->ci_region_size
;
117 /* Don't add CI & RAR share buffer space
118 * managed by TTM to stolen_size */
119 stolen_size
= vram_stolen_size
;
121 rar_stolen_size
= dev_priv
->rar_region_size
;
123 printk(KERN_INFO
"GMMADR(region 0) start: 0x%08x (%dM).\n",
124 pg
->gatt_start
, pg
->gatt_pages
/256);
125 printk(KERN_INFO
"GTTADR(region 3) start: 0x%08x (can map %dM RAM), and actual RAM base 0x%08x.\n",
126 pg
->gtt_start
, gtt_pages
* 4, pg
->gtt_phys_start
);
127 printk(KERN_INFO
"Stole memory information\n");
128 printk(KERN_INFO
" base in RAM: 0x%x\n", pg
->stolen_base
);
129 printk(KERN_INFO
" size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
130 vram_stolen_size
/1024);
131 dvmt_mode
= (pg
->gmch_ctrl
>> 4) & 0x7;
132 printk(KERN_INFO
" the correct size should be: %dM(dvmt mode=%d)\n",
133 (dvmt_mode
== 1) ? 1 : (2 << (dvmt_mode
- 1)), dvmt_mode
);
135 if (ci_stolen_size
> 0)
136 printk(KERN_INFO
"CI Stole memory: RAM base = 0x%08x, size = %lu M\n",
137 dev_priv
->ci_region_start
,
138 ci_stolen_size
/ 1024 / 1024);
139 if (rar_stolen_size
> 0)
140 printk(KERN_INFO
"RAR Stole memory: RAM base = 0x%08x, size = %lu M\n",
141 dev_priv
->rar_region_start
,
142 rar_stolen_size
/ 1024 / 1024);
144 if (resume
&& (gtt_pages
!= pg
->gtt_pages
) &&
145 (stolen_size
!= pg
->stolen_size
)) {
146 DRM_ERROR("GTT resume error.\n");
151 pg
->gtt_pages
= gtt_pages
;
152 pg
->stolen_size
= stolen_size
;
153 pg
->vram_stolen_size
= vram_stolen_size
;
154 pg
->ci_stolen_size
= ci_stolen_size
;
155 pg
->rar_stolen_size
= rar_stolen_size
;
157 ioremap_nocache(pg
->gtt_phys_start
, gtt_pages
<< PAGE_SHIFT
);
159 DRM_ERROR("Failure to map gtt.\n");
164 pg
->vram_addr
= ioremap_wc(pg
->stolen_base
, stolen_size
);
165 if (!pg
->vram_addr
) {
166 DRM_ERROR("Failure to map stolen base.\n");
171 DRM_DEBUG("%s: vram kernel virtual address %p\n", pg
->vram_addr
);
173 tt_pages
= (pg
->gatt_pages
< PSB_TT_PRIV0_PLIMIT
) ?
174 (pg
->gatt_pages
) : PSB_TT_PRIV0_PLIMIT
;
176 ttm_gtt_map
= pg
->gtt_map
+ tt_pages
/ 2;
179 * insert vram stolen pages.
182 pfn_base
= pg
->stolen_base
>> PAGE_SHIFT
;
183 vram_pages
= num_pages
= vram_stolen_size
>> PAGE_SHIFT
;
184 printk(KERN_INFO
"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
185 num_pages
, pfn_base
, 0);
186 for (i
= 0; i
< num_pages
; ++i
) {
187 pte
= psb_gtt_mask_pte(pfn_base
+ i
, 0);
188 iowrite32(pte
, pg
->gtt_map
+ i
);
192 * Init rest of gtt managed by IMG.
194 pfn_base
= page_to_pfn(dev_priv
->scratch_page
);
195 pte
= psb_gtt_mask_pte(pfn_base
, 0);
196 for (; i
< tt_pages
/ 2 - 1; ++i
)
197 iowrite32(pte
, pg
->gtt_map
+ i
);
200 * insert CI stolen pages
203 pfn_base
= dev_priv
->ci_region_start
>> PAGE_SHIFT
;
204 ci_pages
= num_pages
= ci_stolen_size
>> PAGE_SHIFT
;
205 printk(KERN_INFO
"Set up %d CI stolen pages starting at 0x%08x, GTT offset %dK\n",
206 num_pages
, pfn_base
, (ttm_gtt_map
- pg
->gtt_map
) * 4);
207 for (i
= 0; i
< num_pages
; ++i
) {
208 pte
= psb_gtt_mask_pte(pfn_base
+ i
, 0);
209 iowrite32(pte
, ttm_gtt_map
+ i
);
213 * insert RAR stolen pages
215 if (rar_stolen_size
!= 0) {
216 pfn_base
= dev_priv
->rar_region_start
>> PAGE_SHIFT
;
217 num_pages
= rar_stolen_size
>> PAGE_SHIFT
;
218 printk(KERN_INFO
"Set up %d RAR stolen pages starting at 0x%08x, GTT offset %dK\n",
220 (ttm_gtt_map
- pg
->gtt_map
+ i
) * 4);
221 for (; i
< num_pages
+ ci_pages
; ++i
) {
222 pte
= psb_gtt_mask_pte(pfn_base
+ i
- ci_pages
, 0);
223 iowrite32(pte
, ttm_gtt_map
+ i
);
227 * Init rest of gtt managed by TTM.
230 pfn_base
= page_to_pfn(dev_priv
->scratch_page
);
231 pte
= psb_gtt_mask_pte(pfn_base
, 0);
232 PSB_DEBUG_INIT("Initializing the rest of a total "
233 "of %d gtt pages.\n", pg
->gatt_pages
);
235 for (; i
< pg
->gatt_pages
- tt_pages
/ 2; ++i
)
236 iowrite32(pte
, ttm_gtt_map
+ i
);
237 (void) ioread32(pg
->gtt_map
+ i
- 1);
242 psb_gtt_takedown(pg
, 0);
246 int psb_gtt_insert_pages(struct psb_gtt
*pg
, struct page
**pages
,
247 unsigned offset_pages
, unsigned num_pages
,
248 unsigned desired_tile_stride
,
249 unsigned hw_tile_stride
, int type
)
256 uint32_t *cur_page
= NULL
;
260 rows
= num_pages
/ desired_tile_stride
;
262 desired_tile_stride
= num_pages
;
264 add
= desired_tile_stride
;
265 row_add
= hw_tile_stride
;
268 for (i
= 0; i
< rows
; ++i
) {
269 cur_page
= pg
->gtt_map
+ offset_pages
;
270 for (j
= 0; j
< desired_tile_stride
; ++j
) {
272 psb_gtt_mask_pte(page_to_pfn(*pages
++), type
);
273 iowrite32(pte
, cur_page
++);
277 (void) ioread32(cur_page
- 1);
283 int psb_gtt_insert_phys_addresses(struct psb_gtt
*pg
, dma_addr_t
*pPhysFrames
,
284 unsigned offset_pages
, unsigned num_pages
, int type
)
287 uint32_t *cur_page
= NULL
;
292 cur_page
= pg
->gtt_map
+ offset_pages
;
293 for (j
= 0; j
< num_pages
; ++j
) {
295 pte
= psb_gtt_mask_pte(ba
>> PAGE_SHIFT
, type
);
296 iowrite32(pte
, cur_page
++);
298 (void) ioread32(cur_page
- 1);
303 int psb_gtt_remove_pages(struct psb_gtt
*pg
, unsigned offset_pages
,
304 unsigned num_pages
, unsigned desired_tile_stride
,
305 unsigned hw_tile_stride
, int rc_prot
)
307 struct drm_psb_private
*dev_priv
= pg
->dev
->dev_private
;
313 uint32_t *cur_page
= NULL
;
314 unsigned pfn_base
= page_to_pfn(dev_priv
->scratch_page
);
315 uint32_t pte
= psb_gtt_mask_pte(pfn_base
, 0);
318 rows
= num_pages
/ desired_tile_stride
;
320 desired_tile_stride
= num_pages
;
322 add
= desired_tile_stride
;
323 row_add
= hw_tile_stride
;
327 for (i
= 0; i
< rows
; ++i
) {
328 cur_page
= pg
->gtt_map
+ offset_pages
;
329 for (j
= 0; j
< desired_tile_stride
; ++j
)
330 iowrite32(pte
, cur_page
++);
334 (void) ioread32(cur_page
- 1);
341 int psb_gtt_mm_init(struct psb_gtt
*pg
)
343 struct psb_gtt_mm
*gtt_mm
;
344 struct drm_psb_private
*dev_priv
= pg
->dev
->dev_private
;
345 struct drm_open_hash
*ht
;
351 if (!pg
|| !pg
->initialized
) {
352 DRM_DEBUG("Invalid gtt struct\n");
356 gtt_mm
= kzalloc(sizeof(struct psb_gtt_mm
), GFP_KERNEL
);
360 spin_lock_init(>t_mm
->lock
);
363 ret
= drm_ht_create(ht
, 20);
365 DRM_DEBUG("Create hash table failed(%d)\n", ret
);
369 tt_start
= (pg
->stolen_size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
370 tt_start
= (tt_start
< pg
->gatt_pages
) ? tt_start
: pg
->gatt_pages
;
371 tt_size
= (pg
->gatt_pages
< PSB_TT_PRIV0_PLIMIT
) ?
372 (pg
->gatt_pages
) : PSB_TT_PRIV0_PLIMIT
;
376 /*will use tt_start ~ 128M for IMG TT buffers*/
377 ret
= drm_mm_init(mm
, tt_start
, ((tt_size
/ 2) - tt_start
));
379 DRM_DEBUG("drm_mm_int error(%d)\n", ret
);
385 dev_priv
->gtt_mm
= gtt_mm
;
387 DRM_INFO("PSB GTT mem manager ready, tt_start %ld, tt_size %ld pages\n",
388 (unsigned long)tt_start
,
389 (unsigned long)((tt_size
/ 2) - tt_start
));
400 * Delete all hash entries;
402 void psb_gtt_mm_takedown(void)
407 static int psb_gtt_mm_get_ht_by_pid_locked(struct psb_gtt_mm
*mm
,
409 struct psb_gtt_hash_entry
**hentry
)
411 struct drm_hash_item
*entry
;
412 struct psb_gtt_hash_entry
*psb_entry
;
415 ret
= drm_ht_find_item(&mm
->hash
, tgid
, &entry
);
417 DRM_DEBUG("Cannot find entry pid=%ld\n", tgid
);
421 psb_entry
= container_of(entry
, struct psb_gtt_hash_entry
, item
);
423 DRM_DEBUG("Invalid entry");
432 static int psb_gtt_mm_insert_ht_locked(struct psb_gtt_mm
*mm
,
434 struct psb_gtt_hash_entry
*hentry
)
436 struct drm_hash_item
*item
;
440 DRM_DEBUG("Invalid parameters\n");
444 item
= &hentry
->item
;
448 * NOTE: drm_ht_insert_item will perform such a check
449 ret = psb_gtt_mm_get_ht_by_pid(mm, tgid, &tmp);
451 DRM_DEBUG("Entry already exists for pid %ld\n", tgid);
456 /*Insert the given entry*/
457 ret
= drm_ht_insert_item(&mm
->hash
, item
);
459 DRM_DEBUG("Insert failure\n");
468 static int psb_gtt_mm_alloc_insert_ht(struct psb_gtt_mm
*mm
,
470 struct psb_gtt_hash_entry
**entry
)
472 struct psb_gtt_hash_entry
*hentry
;
475 /*if the hentry for this tgid exists, just get it and return*/
476 spin_lock(&mm
->lock
);
477 ret
= psb_gtt_mm_get_ht_by_pid_locked(mm
, tgid
, &hentry
);
479 DRM_DEBUG("Entry for tgid %ld exist, hentry %p\n",
482 spin_unlock(&mm
->lock
);
485 spin_unlock(&mm
->lock
);
487 DRM_DEBUG("Entry for tgid %ld doesn't exist, will create it\n", tgid
);
489 hentry
= kzalloc(sizeof(struct psb_gtt_hash_entry
), GFP_KERNEL
);
491 DRM_DEBUG("Kmalloc failled\n");
495 ret
= drm_ht_create(&hentry
->ht
, 20);
497 DRM_DEBUG("Create hash table failed\n");
501 spin_lock(&mm
->lock
);
502 ret
= psb_gtt_mm_insert_ht_locked(mm
, tgid
, hentry
);
503 spin_unlock(&mm
->lock
);
511 static struct psb_gtt_hash_entry
*
512 psb_gtt_mm_remove_ht_locked(struct psb_gtt_mm
*mm
, u32 tgid
)
514 struct psb_gtt_hash_entry
*tmp
;
517 ret
= psb_gtt_mm_get_ht_by_pid_locked(mm
, tgid
, &tmp
);
519 DRM_DEBUG("Cannot find entry pid %ld\n", tgid
);
523 /*remove it from ht*/
524 drm_ht_remove_item(&mm
->hash
, &tmp
->item
);
531 static int psb_gtt_mm_remove_free_ht_locked(struct psb_gtt_mm
*mm
, u32 tgid
)
533 struct psb_gtt_hash_entry
*entry
;
535 entry
= psb_gtt_mm_remove_ht_locked(mm
, tgid
);
538 DRM_DEBUG("Invalid entry");
543 drm_ht_remove(&entry
->ht
);
551 psb_gtt_mm_get_mem_mapping_locked(struct drm_open_hash
*ht
,
553 struct psb_gtt_mem_mapping
**hentry
)
555 struct drm_hash_item
*entry
;
556 struct psb_gtt_mem_mapping
*mapping
;
559 ret
= drm_ht_find_item(ht
, key
, &entry
);
561 DRM_DEBUG("Cannot find key %ld\n", key
);
565 mapping
= container_of(entry
, struct psb_gtt_mem_mapping
, item
);
567 DRM_DEBUG("Invalid entry\n");
576 psb_gtt_mm_insert_mem_mapping_locked(struct drm_open_hash
*ht
,
578 struct psb_gtt_mem_mapping
*hentry
)
580 struct drm_hash_item
*item
;
581 struct psb_gtt_hash_entry
*entry
;
585 DRM_DEBUG("hentry is NULL\n");
589 item
= &hentry
->item
;
592 ret
= drm_ht_insert_item(ht
, item
);
594 DRM_DEBUG("insert_item failed\n");
598 entry
= container_of(ht
, struct psb_gtt_hash_entry
, ht
);
606 psb_gtt_mm_alloc_insert_mem_mapping(struct psb_gtt_mm
*mm
,
607 struct drm_open_hash
*ht
,
609 struct drm_mm_node
*node
,
610 struct psb_gtt_mem_mapping
**entry
)
612 struct psb_gtt_mem_mapping
*mapping
;
616 DRM_DEBUG("parameter error\n");
620 /*try to get this mem_map */
621 spin_lock(&mm
->lock
);
622 ret
= psb_gtt_mm_get_mem_mapping_locked(ht
, key
, &mapping
);
624 DRM_DEBUG("mapping entry for key %ld exists, entry %p\n",
627 spin_unlock(&mm
->lock
);
630 spin_unlock(&mm
->lock
);
632 DRM_DEBUG("Mapping entry for key %ld doesn't exist, will create it\n",
635 mapping
= kzalloc(sizeof(struct psb_gtt_mem_mapping
), GFP_KERNEL
);
637 DRM_DEBUG("kmalloc failed\n");
641 mapping
->node
= node
;
643 spin_lock(&mm
->lock
);
644 ret
= psb_gtt_mm_insert_mem_mapping_locked(ht
, key
, mapping
);
645 spin_unlock(&mm
->lock
);
653 static struct psb_gtt_mem_mapping
*
654 psb_gtt_mm_remove_mem_mapping_locked(struct drm_open_hash
*ht
, u32 key
)
656 struct psb_gtt_mem_mapping
*tmp
;
657 struct psb_gtt_hash_entry
*entry
;
660 ret
= psb_gtt_mm_get_mem_mapping_locked(ht
, key
, &tmp
);
662 DRM_DEBUG("Cannot find key %ld\n", key
);
666 drm_ht_remove_item(ht
, &tmp
->item
);
668 entry
= container_of(ht
, struct psb_gtt_hash_entry
, ht
);
675 static int psb_gtt_mm_remove_free_mem_mapping_locked(struct drm_open_hash
*ht
,
677 struct drm_mm_node
**node
)
679 struct psb_gtt_mem_mapping
*entry
;
681 entry
= psb_gtt_mm_remove_mem_mapping_locked(ht
, key
);
683 DRM_DEBUG("entry is NULL\n");
693 static int psb_gtt_add_node(struct psb_gtt_mm
*mm
,
696 struct drm_mm_node
*node
,
697 struct psb_gtt_mem_mapping
**entry
)
699 struct psb_gtt_hash_entry
*hentry
;
700 struct psb_gtt_mem_mapping
*mapping
;
703 ret
= psb_gtt_mm_alloc_insert_ht(mm
, tgid
, &hentry
);
705 DRM_DEBUG("alloc_insert failed\n");
709 ret
= psb_gtt_mm_alloc_insert_mem_mapping(mm
,
715 DRM_DEBUG("mapping alloc_insert failed\n");
724 static int psb_gtt_remove_node(struct psb_gtt_mm
*mm
,
727 struct drm_mm_node
**node
)
729 struct psb_gtt_hash_entry
*hentry
;
730 struct drm_mm_node
*tmp
;
733 spin_lock(&mm
->lock
);
734 ret
= psb_gtt_mm_get_ht_by_pid_locked(mm
, tgid
, &hentry
);
736 DRM_DEBUG("Cannot find entry for pid %ld\n", tgid
);
737 spin_unlock(&mm
->lock
);
740 spin_unlock(&mm
->lock
);
742 /*remove mapping entry*/
743 spin_lock(&mm
->lock
);
744 ret
= psb_gtt_mm_remove_free_mem_mapping_locked(&hentry
->ht
,
748 DRM_DEBUG("remove_free failed\n");
749 spin_unlock(&mm
->lock
);
755 /*check the count of mapping entry*/
756 if (!hentry
->count
) {
757 DRM_DEBUG("count of mapping entry is zero, tgid=%ld\n", tgid
);
758 psb_gtt_mm_remove_free_ht_locked(mm
, tgid
);
761 spin_unlock(&mm
->lock
);
766 static int psb_gtt_mm_alloc_mem(struct psb_gtt_mm
*mm
,
769 struct drm_mm_node
**node
)
771 struct drm_mm_node
*tmp_node
;
775 ret
= drm_mm_pre_get(&mm
->base
);
777 DRM_DEBUG("drm_mm_pre_get error\n");
781 spin_lock(&mm
->lock
);
782 tmp_node
= drm_mm_search_free(&mm
->base
, pages
, align
, 1);
783 if (unlikely(!tmp_node
)) {
784 DRM_DEBUG("No free node found\n");
785 spin_unlock(&mm
->lock
);
789 tmp_node
= drm_mm_get_block_atomic(tmp_node
, pages
, align
);
790 spin_unlock(&mm
->lock
);
794 DRM_DEBUG("Node allocation failed\n");
802 static void psb_gtt_mm_free_mem(struct psb_gtt_mm
*mm
, struct drm_mm_node
*node
)
804 spin_lock(&mm
->lock
);
805 drm_mm_put_block(node
);
806 spin_unlock(&mm
->lock
);
809 int psb_gtt_map_meminfo(struct drm_device
*dev
,
810 void *hKernelMemInfo
,
816 struct drm_psb_private
*dev_priv
817 = (struct drm_psb_private
*)dev
->dev_private
;
818 void *psKernelMemInfo
;
819 struct psb_gtt_mm
*mm
= dev_priv
->gtt_mm
;
820 struct psb_gtt
*pg
= dev_priv
->pg
;
821 uint32_t size
, pages
, offset_pages
;
823 struct drm_mm_node
*node
;
824 struct page
**page_list
;
825 struct psb_gtt_mem_mapping
*mapping
= NULL
;
828 ret
= psb_get_meminfo_by_handle(hKernelMemInfo
, &psKernelMemInfo
);
830 DRM_DEBUG("Cannot find kernelMemInfo handle %ld\n",
835 DRM_DEBUG("Got psKernelMemInfo %p for handle %lx\n",
836 psKernelMemInfo
, (u32
)hKernelMemInfo
);
837 size
= psKernelMemInfo
->ui32AllocSize
;
838 kmem
= psKernelMemInfo
->pvLinAddrKM
;
839 pages
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
841 DRM_DEBUG("KerMemInfo size %ld, cpuVadr %lx, pages %ld, osMemHdl %lx\n",
842 size
, kmem
, pages
, psKernelMemInfo
->sMemBlk
.hOSMemHandle
);
845 DRM_DEBUG("kmem is NULL");
848 ret
= psb_get_pages_by_mem_handle(psKernelMemInfo
->sMemBlk
.hOSMemHandle
,
851 DRM_DEBUG("get pages error\n");
855 DRM_DEBUG("get %ld pages\n", pages
);
857 /*alloc memory in TT apeture*/
858 ret
= psb_gtt_mm_alloc_mem(mm
, pages
, 0, &node
);
860 DRM_DEBUG("alloc TT memory error\n");
861 goto failed_pages_alloc
;
864 /*update psb_gtt_mm*/
865 ret
= psb_gtt_add_node(mm
,
866 task_tgid_nr(current
),
871 DRM_DEBUG("add_node failed");
872 goto failed_add_node
;
875 node
= mapping
->node
;
876 offset_pages
= node
->start
;
878 DRM_DEBUG("get free node for %ld pages, offset %ld pages",
879 pages
, offset_pages
);
882 psb_gtt_insert_pages(pg
, page_list
,
883 (unsigned)offset_pages
,
889 *offset
= offset_pages
;
893 psb_gtt_mm_free_mem(mm
, node
);
900 int psb_gtt_unmap_meminfo(struct drm_device
*dev
, void * hKernelMemInfo
)
902 struct drm_psb_private
*dev_priv
903 = (struct drm_psb_private
*)dev
->dev_private
;
904 struct psb_gtt_mm
*mm
= dev_priv
->gtt_mm
;
905 struct psb_gtt
*pg
= dev_priv
->pg
;
906 uint32_t pages
, offset_pages
;
907 struct drm_mm_node
*node
;
910 ret
= psb_gtt_remove_node(mm
,
911 task_tgid_nr(current
),
915 DRM_DEBUG("remove node failed\n");
919 /*remove gtt entries*/
920 offset_pages
= node
->start
;
923 psb_gtt_remove_pages(pg
, offset_pages
, pages
, 0, 0, 1);
928 psb_gtt_mm_free_mem(mm
, node
);
932 int psb_gtt_map_meminfo_ioctl(struct drm_device
*dev
, void *data
,
933 struct drm_file
*file_priv
)
935 struct psb_gtt_mapping_arg
*arg
936 = (struct psb_gtt_mapping_arg
*)data
;
937 uint32_t *offset_pages
= &arg
->offset_pages
;
941 return psb_gtt_map_meminfo(dev
, arg
->hKernelMemInfo
, offset_pages
);
944 int psb_gtt_unmap_meminfo_ioctl(struct drm_device
*dev
, void *data
,
945 struct drm_file
*file_priv
)
948 struct psb_gtt_mapping_arg
*arg
949 = (struct psb_gtt_mapping_arg
*)data
;
953 return psb_gtt_unmap_meminfo(dev
, arg
->hKernelMemInfo
);
956 int psb_gtt_map_pvr_memory(struct drm_device
*dev
, unsigned int hHandle
,
957 unsigned int ui32TaskId
, dma_addr_t
*pPages
,
958 unsigned int ui32PagesNum
, unsigned int *ui32Offset
)
960 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
961 struct psb_gtt_mm
*mm
= dev_priv
->gtt_mm
;
962 struct psb_gtt
*pg
= dev_priv
->pg
;
963 uint32_t size
, pages
, offset_pages
;
964 struct drm_mm_node
*node
= NULL
;
965 struct psb_gtt_mem_mapping
*mapping
= NULL
;
968 size
= ui32PagesNum
* PAGE_SIZE
;
971 /*alloc memory in TT apeture*/
972 ret
= psb_gtt_mm_alloc_mem(mm
, ui32PagesNum
, 0, &node
);
974 DRM_DEBUG("alloc TT memory error\n");
975 goto failed_pages_alloc
;
978 /*update psb_gtt_mm*/
979 ret
= psb_gtt_add_node(mm
,
985 DRM_DEBUG("add_node failed");
986 goto failed_add_node
;
989 node
= mapping
->node
;
990 offset_pages
= node
->start
;
992 DRM_DEBUG("get free node for %ld pages, offset %ld pages",
993 pages
, offset_pages
);
996 psb_gtt_insert_phys_addresses(pg
, pPages
, (unsigned)offset_pages
,
997 (unsigned)ui32PagesNum
, 0);
999 *ui32Offset
= offset_pages
;
1003 psb_gtt_mm_free_mem(mm
, node
);
1009 int psb_gtt_unmap_pvr_memory(struct drm_device
*dev
, unsigned int hHandle
,
1010 unsigned int ui32TaskId
)
1012 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
1013 struct psb_gtt_mm
*mm
= dev_priv
->gtt_mm
;
1014 struct psb_gtt
*pg
= dev_priv
->pg
;
1015 uint32_t pages
, offset_pages
;
1016 struct drm_mm_node
*node
;
1019 ret
= psb_gtt_remove_node(mm
, (u32
)ui32TaskId
, (u32
)hHandle
, &node
);
1021 printk(KERN_ERR
"remove node failed\n");
1025 /*remove gtt entries*/
1026 offset_pages
= node
->start
;
1029 psb_gtt_remove_pages(pg
, offset_pages
, pages
, 0, 0, 1);
1032 psb_gtt_mm_free_mem(mm
, node
);