ASoC: Fix Blackfin I2S _pointer() implementation return in bounds values
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / gma500 / psb_gtt.c
blob53c1e1ed3bd21cd4f31929f95e330410d6af9589
1 /*
2 * Copyright (c) 2007, Intel Corporation.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
21 #include <drm/drmP.h>
22 #include "psb_drv.h"
23 #include "psb_pvr_glue.h"
25 static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
27 uint32_t mask = PSB_PTE_VALID;
29 if (type & PSB_MMU_CACHED_MEMORY)
30 mask |= PSB_PTE_CACHED;
31 if (type & PSB_MMU_RO_MEMORY)
32 mask |= PSB_PTE_RO;
33 if (type & PSB_MMU_WO_MEMORY)
34 mask |= PSB_PTE_WO;
36 return (pfn << PAGE_SHIFT) | mask;
39 struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
41 struct psb_gtt *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
43 if (!tmp)
44 return NULL;
46 init_rwsem(&tmp->sem);
47 tmp->dev = dev;
49 return tmp;
52 void psb_gtt_takedown(struct psb_gtt *pg, int free)
54 struct drm_psb_private *dev_priv = pg->dev->dev_private;
56 if (!pg)
57 return;
59 if (pg->gtt_map) {
60 iounmap(pg->gtt_map);
61 pg->gtt_map = NULL;
63 if (pg->initialized) {
64 pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
65 pg->gmch_ctrl);
66 PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
67 (void) PSB_RVDC32(PSB_PGETBL_CTL);
69 if (free)
70 kfree(pg);
73 int psb_gtt_init(struct psb_gtt *pg, int resume)
75 struct drm_device *dev = pg->dev;
76 struct drm_psb_private *dev_priv = dev->dev_private;
77 unsigned gtt_pages;
78 unsigned long stolen_size, vram_stolen_size, ci_stolen_size;
79 unsigned long rar_stolen_size;
80 unsigned i, num_pages;
81 unsigned pfn_base;
82 uint32_t ci_pages, vram_pages;
83 uint32_t tt_pages;
84 uint32_t *ttm_gtt_map;
85 uint32_t dvmt_mode = 0;
87 int ret = 0;
88 uint32_t pte;
90 pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
91 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
92 pg->gmch_ctrl | _PSB_GMCH_ENABLED);
94 pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
95 PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
96 (void) PSB_RVDC32(PSB_PGETBL_CTL);
98 pg->initialized = 1;
100 pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
102 pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
103 /* fix me: video mmu has hw bug to access 0x0D0000000,
104 * then make gatt start at 0x0e000,0000 */
105 pg->mmu_gatt_start = PSB_MEM_TT_START;
106 pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
107 gtt_pages =
108 pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
109 pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
110 >> PAGE_SHIFT;
112 pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
113 vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
115 /* CI is not included in the stolen size since the TOPAZ MMU bug */
116 ci_stolen_size = dev_priv->ci_region_size;
117 /* Don't add CI & RAR share buffer space
118 * managed by TTM to stolen_size */
119 stolen_size = vram_stolen_size;
121 rar_stolen_size = dev_priv->rar_region_size;
123 printk(KERN_INFO"GMMADR(region 0) start: 0x%08x (%dM).\n",
124 pg->gatt_start, pg->gatt_pages/256);
125 printk(KERN_INFO"GTTADR(region 3) start: 0x%08x (can map %dM RAM), and actual RAM base 0x%08x.\n",
126 pg->gtt_start, gtt_pages * 4, pg->gtt_phys_start);
127 printk(KERN_INFO "Stole memory information\n");
128 printk(KERN_INFO " base in RAM: 0x%x\n", pg->stolen_base);
129 printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
130 vram_stolen_size/1024);
131 dvmt_mode = (pg->gmch_ctrl >> 4) & 0x7;
132 printk(KERN_INFO " the correct size should be: %dM(dvmt mode=%d)\n",
133 (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
135 if (ci_stolen_size > 0)
136 printk(KERN_INFO"CI Stole memory: RAM base = 0x%08x, size = %lu M\n",
137 dev_priv->ci_region_start,
138 ci_stolen_size / 1024 / 1024);
139 if (rar_stolen_size > 0)
140 printk(KERN_INFO "RAR Stole memory: RAM base = 0x%08x, size = %lu M\n",
141 dev_priv->rar_region_start,
142 rar_stolen_size / 1024 / 1024);
144 if (resume && (gtt_pages != pg->gtt_pages) &&
145 (stolen_size != pg->stolen_size)) {
146 DRM_ERROR("GTT resume error.\n");
147 ret = -EINVAL;
148 goto out_err;
151 pg->gtt_pages = gtt_pages;
152 pg->stolen_size = stolen_size;
153 pg->vram_stolen_size = vram_stolen_size;
154 pg->ci_stolen_size = ci_stolen_size;
155 pg->rar_stolen_size = rar_stolen_size;
156 pg->gtt_map =
157 ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
158 if (!pg->gtt_map) {
159 DRM_ERROR("Failure to map gtt.\n");
160 ret = -ENOMEM;
161 goto out_err;
164 pg->vram_addr = ioremap_wc(pg->stolen_base, stolen_size);
165 if (!pg->vram_addr) {
166 DRM_ERROR("Failure to map stolen base.\n");
167 ret = -ENOMEM;
168 goto out_err;
171 DRM_DEBUG("%s: vram kernel virtual address %p\n", pg->vram_addr);
173 tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
174 (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
176 ttm_gtt_map = pg->gtt_map + tt_pages / 2;
179 * insert vram stolen pages.
182 pfn_base = pg->stolen_base >> PAGE_SHIFT;
183 vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
184 printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
185 num_pages, pfn_base, 0);
186 for (i = 0; i < num_pages; ++i) {
187 pte = psb_gtt_mask_pte(pfn_base + i, 0);
188 iowrite32(pte, pg->gtt_map + i);
192 * Init rest of gtt managed by IMG.
194 pfn_base = page_to_pfn(dev_priv->scratch_page);
195 pte = psb_gtt_mask_pte(pfn_base, 0);
196 for (; i < tt_pages / 2 - 1; ++i)
197 iowrite32(pte, pg->gtt_map + i);
200 * insert CI stolen pages
203 pfn_base = dev_priv->ci_region_start >> PAGE_SHIFT;
204 ci_pages = num_pages = ci_stolen_size >> PAGE_SHIFT;
205 printk(KERN_INFO"Set up %d CI stolen pages starting at 0x%08x, GTT offset %dK\n",
206 num_pages, pfn_base, (ttm_gtt_map - pg->gtt_map) * 4);
207 for (i = 0; i < num_pages; ++i) {
208 pte = psb_gtt_mask_pte(pfn_base + i, 0);
209 iowrite32(pte, ttm_gtt_map + i);
213 * insert RAR stolen pages
215 if (rar_stolen_size != 0) {
216 pfn_base = dev_priv->rar_region_start >> PAGE_SHIFT;
217 num_pages = rar_stolen_size >> PAGE_SHIFT;
218 printk(KERN_INFO"Set up %d RAR stolen pages starting at 0x%08x, GTT offset %dK\n",
219 num_pages, pfn_base,
220 (ttm_gtt_map - pg->gtt_map + i) * 4);
221 for (; i < num_pages + ci_pages; ++i) {
222 pte = psb_gtt_mask_pte(pfn_base + i - ci_pages, 0);
223 iowrite32(pte, ttm_gtt_map + i);
227 * Init rest of gtt managed by TTM.
230 pfn_base = page_to_pfn(dev_priv->scratch_page);
231 pte = psb_gtt_mask_pte(pfn_base, 0);
232 PSB_DEBUG_INIT("Initializing the rest of a total "
233 "of %d gtt pages.\n", pg->gatt_pages);
235 for (; i < pg->gatt_pages - tt_pages / 2; ++i)
236 iowrite32(pte, ttm_gtt_map + i);
237 (void) ioread32(pg->gtt_map + i - 1);
239 return 0;
241 out_err:
242 psb_gtt_takedown(pg, 0);
243 return ret;
246 int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
247 unsigned offset_pages, unsigned num_pages,
248 unsigned desired_tile_stride,
249 unsigned hw_tile_stride, int type)
251 unsigned rows = 1;
252 unsigned add;
253 unsigned row_add;
254 unsigned i;
255 unsigned j;
256 uint32_t *cur_page = NULL;
257 uint32_t pte;
259 if (hw_tile_stride)
260 rows = num_pages / desired_tile_stride;
261 else
262 desired_tile_stride = num_pages;
264 add = desired_tile_stride;
265 row_add = hw_tile_stride;
267 down_read(&pg->sem);
268 for (i = 0; i < rows; ++i) {
269 cur_page = pg->gtt_map + offset_pages;
270 for (j = 0; j < desired_tile_stride; ++j) {
271 pte =
272 psb_gtt_mask_pte(page_to_pfn(*pages++), type);
273 iowrite32(pte, cur_page++);
275 offset_pages += add;
277 (void) ioread32(cur_page - 1);
278 up_read(&pg->sem);
280 return 0;
283 int psb_gtt_insert_phys_addresses(struct psb_gtt *pg, dma_addr_t *pPhysFrames,
284 unsigned offset_pages, unsigned num_pages, int type)
286 unsigned j;
287 uint32_t *cur_page = NULL;
288 uint32_t pte;
289 u32 ba;
291 down_read(&pg->sem);
292 cur_page = pg->gtt_map + offset_pages;
293 for (j = 0; j < num_pages; ++j) {
294 ba = *pPhysFrames++;
295 pte = psb_gtt_mask_pte(ba >> PAGE_SHIFT, type);
296 iowrite32(pte, cur_page++);
298 (void) ioread32(cur_page - 1);
299 up_read(&pg->sem);
300 return 0;
303 int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
304 unsigned num_pages, unsigned desired_tile_stride,
305 unsigned hw_tile_stride, int rc_prot)
307 struct drm_psb_private *dev_priv = pg->dev->dev_private;
308 unsigned rows = 1;
309 unsigned add;
310 unsigned row_add;
311 unsigned i;
312 unsigned j;
313 uint32_t *cur_page = NULL;
314 unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
315 uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
317 if (hw_tile_stride)
318 rows = num_pages / desired_tile_stride;
319 else
320 desired_tile_stride = num_pages;
322 add = desired_tile_stride;
323 row_add = hw_tile_stride;
325 if (rc_prot)
326 down_read(&pg->sem);
327 for (i = 0; i < rows; ++i) {
328 cur_page = pg->gtt_map + offset_pages;
329 for (j = 0; j < desired_tile_stride; ++j)
330 iowrite32(pte, cur_page++);
332 offset_pages += add;
334 (void) ioread32(cur_page - 1);
335 if (rc_prot)
336 up_read(&pg->sem);
338 return 0;
341 int psb_gtt_mm_init(struct psb_gtt *pg)
343 struct psb_gtt_mm *gtt_mm;
344 struct drm_psb_private *dev_priv = pg->dev->dev_private;
345 struct drm_open_hash *ht;
346 struct drm_mm *mm;
347 int ret;
348 uint32_t tt_start;
349 uint32_t tt_size;
351 if (!pg || !pg->initialized) {
352 DRM_DEBUG("Invalid gtt struct\n");
353 return -EINVAL;
356 gtt_mm = kzalloc(sizeof(struct psb_gtt_mm), GFP_KERNEL);
357 if (!gtt_mm)
358 return -ENOMEM;
360 spin_lock_init(&gtt_mm->lock);
362 ht = &gtt_mm->hash;
363 ret = drm_ht_create(ht, 20);
364 if (ret) {
365 DRM_DEBUG("Create hash table failed(%d)\n", ret);
366 goto err_free;
369 tt_start = (pg->stolen_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
370 tt_start = (tt_start < pg->gatt_pages) ? tt_start : pg->gatt_pages;
371 tt_size = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
372 (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
374 mm = &gtt_mm->base;
376 /*will use tt_start ~ 128M for IMG TT buffers*/
377 ret = drm_mm_init(mm, tt_start, ((tt_size / 2) - tt_start));
378 if (ret) {
379 DRM_DEBUG("drm_mm_int error(%d)\n", ret);
380 goto err_mm_init;
383 gtt_mm->count = 0;
385 dev_priv->gtt_mm = gtt_mm;
387 DRM_INFO("PSB GTT mem manager ready, tt_start %ld, tt_size %ld pages\n",
388 (unsigned long)tt_start,
389 (unsigned long)((tt_size / 2) - tt_start));
390 return 0;
391 err_mm_init:
392 drm_ht_remove(ht);
394 err_free:
395 kfree(gtt_mm);
396 return ret;
400 * Delete all hash entries;
402 void psb_gtt_mm_takedown(void)
404 return;
407 static int psb_gtt_mm_get_ht_by_pid_locked(struct psb_gtt_mm *mm,
408 u32 tgid,
409 struct psb_gtt_hash_entry **hentry)
411 struct drm_hash_item *entry;
412 struct psb_gtt_hash_entry *psb_entry;
413 int ret;
415 ret = drm_ht_find_item(&mm->hash, tgid, &entry);
416 if (ret) {
417 DRM_DEBUG("Cannot find entry pid=%ld\n", tgid);
418 return ret;
421 psb_entry = container_of(entry, struct psb_gtt_hash_entry, item);
422 if (!psb_entry) {
423 DRM_DEBUG("Invalid entry");
424 return -EINVAL;
427 *hentry = psb_entry;
428 return 0;
432 static int psb_gtt_mm_insert_ht_locked(struct psb_gtt_mm *mm,
433 u32 tgid,
434 struct psb_gtt_hash_entry *hentry)
436 struct drm_hash_item *item;
437 int ret;
439 if (!hentry) {
440 DRM_DEBUG("Invalid parameters\n");
441 return -EINVAL;
444 item = &hentry->item;
445 item->key = tgid;
448 * NOTE: drm_ht_insert_item will perform such a check
449 ret = psb_gtt_mm_get_ht_by_pid(mm, tgid, &tmp);
450 if (!ret) {
451 DRM_DEBUG("Entry already exists for pid %ld\n", tgid);
452 return -EAGAIN;
456 /*Insert the given entry*/
457 ret = drm_ht_insert_item(&mm->hash, item);
458 if (ret) {
459 DRM_DEBUG("Insert failure\n");
460 return ret;
463 mm->count++;
465 return 0;
468 static int psb_gtt_mm_alloc_insert_ht(struct psb_gtt_mm *mm,
469 u32 tgid,
470 struct psb_gtt_hash_entry **entry)
472 struct psb_gtt_hash_entry *hentry;
473 int ret;
475 /*if the hentry for this tgid exists, just get it and return*/
476 spin_lock(&mm->lock);
477 ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
478 if (!ret) {
479 DRM_DEBUG("Entry for tgid %ld exist, hentry %p\n",
480 tgid, hentry);
481 *entry = hentry;
482 spin_unlock(&mm->lock);
483 return 0;
485 spin_unlock(&mm->lock);
487 DRM_DEBUG("Entry for tgid %ld doesn't exist, will create it\n", tgid);
489 hentry = kzalloc(sizeof(struct psb_gtt_hash_entry), GFP_KERNEL);
490 if (!hentry) {
491 DRM_DEBUG("Kmalloc failled\n");
492 return -ENOMEM;
495 ret = drm_ht_create(&hentry->ht, 20);
496 if (ret) {
497 DRM_DEBUG("Create hash table failed\n");
498 return ret;
501 spin_lock(&mm->lock);
502 ret = psb_gtt_mm_insert_ht_locked(mm, tgid, hentry);
503 spin_unlock(&mm->lock);
505 if (!ret)
506 *entry = hentry;
508 return ret;
511 static struct psb_gtt_hash_entry *
512 psb_gtt_mm_remove_ht_locked(struct psb_gtt_mm *mm, u32 tgid)
514 struct psb_gtt_hash_entry *tmp;
515 int ret;
517 ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &tmp);
518 if (ret) {
519 DRM_DEBUG("Cannot find entry pid %ld\n", tgid);
520 return NULL;
523 /*remove it from ht*/
524 drm_ht_remove_item(&mm->hash, &tmp->item);
526 mm->count--;
528 return tmp;
531 static int psb_gtt_mm_remove_free_ht_locked(struct psb_gtt_mm *mm, u32 tgid)
533 struct psb_gtt_hash_entry *entry;
535 entry = psb_gtt_mm_remove_ht_locked(mm, tgid);
537 if (!entry) {
538 DRM_DEBUG("Invalid entry");
539 return -EINVAL;
542 /*delete ht*/
543 drm_ht_remove(&entry->ht);
545 /*free this entry*/
546 kfree(entry);
547 return 0;
550 static int
551 psb_gtt_mm_get_mem_mapping_locked(struct drm_open_hash *ht,
552 u32 key,
553 struct psb_gtt_mem_mapping **hentry)
555 struct drm_hash_item *entry;
556 struct psb_gtt_mem_mapping *mapping;
557 int ret;
559 ret = drm_ht_find_item(ht, key, &entry);
560 if (ret) {
561 DRM_DEBUG("Cannot find key %ld\n", key);
562 return ret;
565 mapping = container_of(entry, struct psb_gtt_mem_mapping, item);
566 if (!mapping) {
567 DRM_DEBUG("Invalid entry\n");
568 return -EINVAL;
571 *hentry = mapping;
572 return 0;
575 static int
576 psb_gtt_mm_insert_mem_mapping_locked(struct drm_open_hash *ht,
577 u32 key,
578 struct psb_gtt_mem_mapping *hentry)
580 struct drm_hash_item *item;
581 struct psb_gtt_hash_entry *entry;
582 int ret;
584 if (!hentry) {
585 DRM_DEBUG("hentry is NULL\n");
586 return -EINVAL;
589 item = &hentry->item;
590 item->key = key;
592 ret = drm_ht_insert_item(ht, item);
593 if (ret) {
594 DRM_DEBUG("insert_item failed\n");
595 return ret;
598 entry = container_of(ht, struct psb_gtt_hash_entry, ht);
599 if (entry)
600 entry->count++;
602 return 0;
605 static int
606 psb_gtt_mm_alloc_insert_mem_mapping(struct psb_gtt_mm *mm,
607 struct drm_open_hash *ht,
608 u32 key,
609 struct drm_mm_node *node,
610 struct psb_gtt_mem_mapping **entry)
612 struct psb_gtt_mem_mapping *mapping;
613 int ret;
615 if (!node || !ht) {
616 DRM_DEBUG("parameter error\n");
617 return -EINVAL;
620 /*try to get this mem_map */
621 spin_lock(&mm->lock);
622 ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &mapping);
623 if (!ret) {
624 DRM_DEBUG("mapping entry for key %ld exists, entry %p\n",
625 key, mapping);
626 *entry = mapping;
627 spin_unlock(&mm->lock);
628 return 0;
630 spin_unlock(&mm->lock);
632 DRM_DEBUG("Mapping entry for key %ld doesn't exist, will create it\n",
633 key);
635 mapping = kzalloc(sizeof(struct psb_gtt_mem_mapping), GFP_KERNEL);
636 if (!mapping) {
637 DRM_DEBUG("kmalloc failed\n");
638 return -ENOMEM;
641 mapping->node = node;
643 spin_lock(&mm->lock);
644 ret = psb_gtt_mm_insert_mem_mapping_locked(ht, key, mapping);
645 spin_unlock(&mm->lock);
647 if (!ret)
648 *entry = mapping;
650 return ret;
653 static struct psb_gtt_mem_mapping *
654 psb_gtt_mm_remove_mem_mapping_locked(struct drm_open_hash *ht, u32 key)
656 struct psb_gtt_mem_mapping *tmp;
657 struct psb_gtt_hash_entry *entry;
658 int ret;
660 ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &tmp);
661 if (ret) {
662 DRM_DEBUG("Cannot find key %ld\n", key);
663 return NULL;
666 drm_ht_remove_item(ht, &tmp->item);
668 entry = container_of(ht, struct psb_gtt_hash_entry, ht);
669 if (entry)
670 entry->count--;
672 return tmp;
675 static int psb_gtt_mm_remove_free_mem_mapping_locked(struct drm_open_hash *ht,
676 u32 key,
677 struct drm_mm_node **node)
679 struct psb_gtt_mem_mapping *entry;
681 entry = psb_gtt_mm_remove_mem_mapping_locked(ht, key);
682 if (!entry) {
683 DRM_DEBUG("entry is NULL\n");
684 return -EINVAL;
687 *node = entry->node;
689 kfree(entry);
690 return 0;
693 static int psb_gtt_add_node(struct psb_gtt_mm *mm,
694 u32 tgid,
695 u32 key,
696 struct drm_mm_node *node,
697 struct psb_gtt_mem_mapping **entry)
699 struct psb_gtt_hash_entry *hentry;
700 struct psb_gtt_mem_mapping *mapping;
701 int ret;
703 ret = psb_gtt_mm_alloc_insert_ht(mm, tgid, &hentry);
704 if (ret) {
705 DRM_DEBUG("alloc_insert failed\n");
706 return ret;
709 ret = psb_gtt_mm_alloc_insert_mem_mapping(mm,
710 &hentry->ht,
711 key,
712 node,
713 &mapping);
714 if (ret) {
715 DRM_DEBUG("mapping alloc_insert failed\n");
716 return ret;
719 *entry = mapping;
721 return 0;
724 static int psb_gtt_remove_node(struct psb_gtt_mm *mm,
725 u32 tgid,
726 u32 key,
727 struct drm_mm_node **node)
729 struct psb_gtt_hash_entry *hentry;
730 struct drm_mm_node *tmp;
731 int ret;
733 spin_lock(&mm->lock);
734 ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
735 if (ret) {
736 DRM_DEBUG("Cannot find entry for pid %ld\n", tgid);
737 spin_unlock(&mm->lock);
738 return ret;
740 spin_unlock(&mm->lock);
742 /*remove mapping entry*/
743 spin_lock(&mm->lock);
744 ret = psb_gtt_mm_remove_free_mem_mapping_locked(&hentry->ht,
745 key,
746 &tmp);
747 if (ret) {
748 DRM_DEBUG("remove_free failed\n");
749 spin_unlock(&mm->lock);
750 return ret;
753 *node = tmp;
755 /*check the count of mapping entry*/
756 if (!hentry->count) {
757 DRM_DEBUG("count of mapping entry is zero, tgid=%ld\n", tgid);
758 psb_gtt_mm_remove_free_ht_locked(mm, tgid);
761 spin_unlock(&mm->lock);
763 return 0;
766 static int psb_gtt_mm_alloc_mem(struct psb_gtt_mm *mm,
767 uint32_t pages,
768 uint32_t align,
769 struct drm_mm_node **node)
771 struct drm_mm_node *tmp_node;
772 int ret;
774 do {
775 ret = drm_mm_pre_get(&mm->base);
776 if (unlikely(ret)) {
777 DRM_DEBUG("drm_mm_pre_get error\n");
778 return ret;
781 spin_lock(&mm->lock);
782 tmp_node = drm_mm_search_free(&mm->base, pages, align, 1);
783 if (unlikely(!tmp_node)) {
784 DRM_DEBUG("No free node found\n");
785 spin_unlock(&mm->lock);
786 break;
789 tmp_node = drm_mm_get_block_atomic(tmp_node, pages, align);
790 spin_unlock(&mm->lock);
791 } while (!tmp_node);
793 if (!tmp_node) {
794 DRM_DEBUG("Node allocation failed\n");
795 return -ENOMEM;
798 *node = tmp_node;
799 return 0;
802 static void psb_gtt_mm_free_mem(struct psb_gtt_mm *mm, struct drm_mm_node *node)
804 spin_lock(&mm->lock);
805 drm_mm_put_block(node);
806 spin_unlock(&mm->lock);
809 int psb_gtt_map_meminfo(struct drm_device *dev,
810 void *hKernelMemInfo,
811 uint32_t *offset)
813 return -EINVAL;
814 /* FIXMEAC */
815 #if 0
816 struct drm_psb_private *dev_priv
817 = (struct drm_psb_private *)dev->dev_private;
818 void *psKernelMemInfo;
819 struct psb_gtt_mm *mm = dev_priv->gtt_mm;
820 struct psb_gtt *pg = dev_priv->pg;
821 uint32_t size, pages, offset_pages;
822 void *kmem;
823 struct drm_mm_node *node;
824 struct page **page_list;
825 struct psb_gtt_mem_mapping *mapping = NULL;
826 int ret;
828 ret = psb_get_meminfo_by_handle(hKernelMemInfo, &psKernelMemInfo);
829 if (ret) {
830 DRM_DEBUG("Cannot find kernelMemInfo handle %ld\n",
831 hKernelMemInfo);
832 return -EINVAL;
835 DRM_DEBUG("Got psKernelMemInfo %p for handle %lx\n",
836 psKernelMemInfo, (u32)hKernelMemInfo);
837 size = psKernelMemInfo->ui32AllocSize;
838 kmem = psKernelMemInfo->pvLinAddrKM;
839 pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
841 DRM_DEBUG("KerMemInfo size %ld, cpuVadr %lx, pages %ld, osMemHdl %lx\n",
842 size, kmem, pages, psKernelMemInfo->sMemBlk.hOSMemHandle);
844 if (!kmem)
845 DRM_DEBUG("kmem is NULL");
847 /*get pages*/
848 ret = psb_get_pages_by_mem_handle(psKernelMemInfo->sMemBlk.hOSMemHandle,
849 &page_list);
850 if (ret) {
851 DRM_DEBUG("get pages error\n");
852 return ret;
855 DRM_DEBUG("get %ld pages\n", pages);
857 /*alloc memory in TT apeture*/
858 ret = psb_gtt_mm_alloc_mem(mm, pages, 0, &node);
859 if (ret) {
860 DRM_DEBUG("alloc TT memory error\n");
861 goto failed_pages_alloc;
864 /*update psb_gtt_mm*/
865 ret = psb_gtt_add_node(mm,
866 task_tgid_nr(current),
867 (u32)hKernelMemInfo,
868 node,
869 &mapping);
870 if (ret) {
871 DRM_DEBUG("add_node failed");
872 goto failed_add_node;
875 node = mapping->node;
876 offset_pages = node->start;
878 DRM_DEBUG("get free node for %ld pages, offset %ld pages",
879 pages, offset_pages);
881 /*update gtt*/
882 psb_gtt_insert_pages(pg, page_list,
883 (unsigned)offset_pages,
884 (unsigned)pages,
889 *offset = offset_pages;
890 return 0;
892 failed_add_node:
893 psb_gtt_mm_free_mem(mm, node);
894 failed_pages_alloc:
895 kfree(page_list);
896 return ret;
897 #endif
900 int psb_gtt_unmap_meminfo(struct drm_device *dev, void * hKernelMemInfo)
902 struct drm_psb_private *dev_priv
903 = (struct drm_psb_private *)dev->dev_private;
904 struct psb_gtt_mm *mm = dev_priv->gtt_mm;
905 struct psb_gtt *pg = dev_priv->pg;
906 uint32_t pages, offset_pages;
907 struct drm_mm_node *node;
908 int ret;
910 ret = psb_gtt_remove_node(mm,
911 task_tgid_nr(current),
912 (u32)hKernelMemInfo,
913 &node);
914 if (ret) {
915 DRM_DEBUG("remove node failed\n");
916 return ret;
919 /*remove gtt entries*/
920 offset_pages = node->start;
921 pages = node->size;
923 psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0, 1);
926 /*free tt node*/
928 psb_gtt_mm_free_mem(mm, node);
929 return 0;
932 int psb_gtt_map_meminfo_ioctl(struct drm_device *dev, void *data,
933 struct drm_file *file_priv)
935 struct psb_gtt_mapping_arg *arg
936 = (struct psb_gtt_mapping_arg *)data;
937 uint32_t *offset_pages = &arg->offset_pages;
939 DRM_DEBUG("\n");
941 return psb_gtt_map_meminfo(dev, arg->hKernelMemInfo, offset_pages);
944 int psb_gtt_unmap_meminfo_ioctl(struct drm_device *dev, void *data,
945 struct drm_file *file_priv)
948 struct psb_gtt_mapping_arg *arg
949 = (struct psb_gtt_mapping_arg *)data;
951 DRM_DEBUG("\n");
953 return psb_gtt_unmap_meminfo(dev, arg->hKernelMemInfo);
956 int psb_gtt_map_pvr_memory(struct drm_device *dev, unsigned int hHandle,
957 unsigned int ui32TaskId, dma_addr_t *pPages,
958 unsigned int ui32PagesNum, unsigned int *ui32Offset)
960 struct drm_psb_private *dev_priv = dev->dev_private;
961 struct psb_gtt_mm *mm = dev_priv->gtt_mm;
962 struct psb_gtt *pg = dev_priv->pg;
963 uint32_t size, pages, offset_pages;
964 struct drm_mm_node *node = NULL;
965 struct psb_gtt_mem_mapping *mapping = NULL;
966 int ret;
968 size = ui32PagesNum * PAGE_SIZE;
969 pages = 0;
971 /*alloc memory in TT apeture*/
972 ret = psb_gtt_mm_alloc_mem(mm, ui32PagesNum, 0, &node);
973 if (ret) {
974 DRM_DEBUG("alloc TT memory error\n");
975 goto failed_pages_alloc;
978 /*update psb_gtt_mm*/
979 ret = psb_gtt_add_node(mm,
980 (u32)ui32TaskId,
981 (u32)hHandle,
982 node,
983 &mapping);
984 if (ret) {
985 DRM_DEBUG("add_node failed");
986 goto failed_add_node;
989 node = mapping->node;
990 offset_pages = node->start;
992 DRM_DEBUG("get free node for %ld pages, offset %ld pages",
993 pages, offset_pages);
995 /*update gtt*/
996 psb_gtt_insert_phys_addresses(pg, pPages, (unsigned)offset_pages,
997 (unsigned)ui32PagesNum, 0);
999 *ui32Offset = offset_pages;
1000 return 0;
1002 failed_add_node:
1003 psb_gtt_mm_free_mem(mm, node);
1004 failed_pages_alloc:
1005 return ret;
1009 int psb_gtt_unmap_pvr_memory(struct drm_device *dev, unsigned int hHandle,
1010 unsigned int ui32TaskId)
1012 struct drm_psb_private *dev_priv = dev->dev_private;
1013 struct psb_gtt_mm *mm = dev_priv->gtt_mm;
1014 struct psb_gtt *pg = dev_priv->pg;
1015 uint32_t pages, offset_pages;
1016 struct drm_mm_node *node;
1017 int ret;
1019 ret = psb_gtt_remove_node(mm, (u32)ui32TaskId, (u32)hHandle, &node);
1020 if (ret) {
1021 printk(KERN_ERR "remove node failed\n");
1022 return ret;
1025 /*remove gtt entries*/
1026 offset_pages = node->start;
1027 pages = node->size;
1029 psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0, 1);
1031 /*free tt node*/
1032 psb_gtt_mm_free_mem(mm, node);
1033 return 0;