gma500: Add a gtt allocator
[linux-2.6.git] / drivers / staging / gma500 / psb_gtt.c
bloba97e7beefc17957421a2cfc62d08582b8ade6d00
1 /*
2 * Copyright (c) 2007, Intel Corporation.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
21 #include <drm/drmP.h>
22 #include "psb_drv.h"
23 #include "psb_pvr_glue.h"
25 static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
27 uint32_t mask = PSB_PTE_VALID;
29 if (type & PSB_MMU_CACHED_MEMORY)
30 mask |= PSB_PTE_CACHED;
31 if (type & PSB_MMU_RO_MEMORY)
32 mask |= PSB_PTE_RO;
33 if (type & PSB_MMU_WO_MEMORY)
34 mask |= PSB_PTE_WO;
36 return (pfn << PAGE_SHIFT) | mask;
39 struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
41 struct psb_gtt *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
43 if (!tmp)
44 return NULL;
46 init_rwsem(&tmp->sem);
47 tmp->dev = dev;
49 return tmp;
52 void psb_gtt_takedown(struct psb_gtt *pg, int free)
54 struct drm_psb_private *dev_priv = pg->dev->dev_private;
56 if (!pg)
57 return;
59 if (pg->gtt_map) {
60 iounmap(pg->gtt_map);
61 pg->gtt_map = NULL;
63 if (pg->initialized) {
64 pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
65 pg->gmch_ctrl);
66 PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
67 (void) PSB_RVDC32(PSB_PGETBL_CTL);
69 if (free)
70 kfree(pg);
73 int psb_gtt_init(struct psb_gtt *pg, int resume)
75 struct drm_device *dev = pg->dev;
76 struct drm_psb_private *dev_priv = dev->dev_private;
77 unsigned gtt_pages;
78 unsigned long stolen_size, vram_stolen_size;
79 unsigned i, num_pages;
80 unsigned pfn_base;
81 uint32_t vram_pages;
82 uint32_t tt_pages;
83 uint32_t *ttm_gtt_map;
84 uint32_t dvmt_mode = 0;
86 int ret = 0;
87 uint32_t pte;
89 pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
90 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
91 pg->gmch_ctrl | _PSB_GMCH_ENABLED);
93 pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
94 PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
95 (void) PSB_RVDC32(PSB_PGETBL_CTL);
97 /* The root resource we allocate address space from */
98 dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
100 pg->initialized = 1;
101 pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
103 pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
104 /* fix me: video mmu has hw bug to access 0x0D0000000,
105 * then make gatt start at 0x0e000,0000 */
106 pg->mmu_gatt_start = 0xE0000000;
107 pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
108 gtt_pages =
109 pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
110 pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
111 >> PAGE_SHIFT;
113 pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
114 vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
116 stolen_size = vram_stolen_size;
118 printk(KERN_INFO"GMMADR(region 0) start: 0x%08x (%dM).\n",
119 pg->gatt_start, pg->gatt_pages/256);
120 printk(KERN_INFO"GTTADR(region 3) start: 0x%08x (can map %dM RAM), and actual RAM base 0x%08x.\n",
121 pg->gtt_start, gtt_pages * 4, pg->gtt_phys_start);
122 printk(KERN_INFO "Stolen memory information\n");
123 printk(KERN_INFO " base in RAM: 0x%x\n", pg->stolen_base);
124 printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
125 vram_stolen_size/1024);
126 dvmt_mode = (pg->gmch_ctrl >> 4) & 0x7;
127 printk(KERN_INFO " the correct size should be: %dM(dvmt mode=%d)\n",
128 (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
130 if (resume && (gtt_pages != pg->gtt_pages) &&
131 (stolen_size != pg->stolen_size)) {
132 DRM_ERROR("GTT resume error.\n");
133 ret = -EINVAL;
134 goto out_err;
137 pg->gtt_pages = gtt_pages;
138 pg->stolen_size = stolen_size;
139 pg->vram_stolen_size = vram_stolen_size;
140 pg->gtt_map =
141 ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
142 if (!pg->gtt_map) {
143 DRM_ERROR("Failure to map gtt.\n");
144 ret = -ENOMEM;
145 goto out_err;
148 pg->vram_addr = ioremap_wc(pg->stolen_base, stolen_size);
149 if (!pg->vram_addr) {
150 DRM_ERROR("Failure to map stolen base.\n");
151 ret = -ENOMEM;
152 goto out_err;
155 DRM_DEBUG("%s: vram kernel virtual address %p\n", pg->vram_addr);
157 tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
158 (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
160 ttm_gtt_map = pg->gtt_map + tt_pages / 2;
163 * insert vram stolen pages.
166 pfn_base = pg->stolen_base >> PAGE_SHIFT;
167 vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
168 printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
169 num_pages, pfn_base, 0);
170 for (i = 0; i < num_pages; ++i) {
171 pte = psb_gtt_mask_pte(pfn_base + i, 0);
172 iowrite32(pte, pg->gtt_map + i);
176 * Init rest of gtt managed by IMG.
178 pfn_base = page_to_pfn(dev_priv->scratch_page);
179 pte = psb_gtt_mask_pte(pfn_base, 0);
180 for (; i < tt_pages / 2 - 1; ++i)
181 iowrite32(pte, pg->gtt_map + i);
184 * Init rest of gtt managed by TTM.
187 pfn_base = page_to_pfn(dev_priv->scratch_page);
188 pte = psb_gtt_mask_pte(pfn_base, 0);
189 PSB_DEBUG_INIT("Initializing the rest of a total "
190 "of %d gtt pages.\n", pg->gatt_pages);
192 for (; i < pg->gatt_pages - tt_pages / 2; ++i)
193 iowrite32(pte, ttm_gtt_map + i);
194 (void) ioread32(pg->gtt_map + i - 1);
196 return 0;
198 out_err:
199 psb_gtt_takedown(pg, 0);
200 return ret;
203 int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
204 unsigned offset_pages, unsigned num_pages,
205 unsigned desired_tile_stride,
206 unsigned hw_tile_stride, int type)
208 unsigned rows = 1;
209 unsigned add;
210 unsigned row_add;
211 unsigned i;
212 unsigned j;
213 uint32_t *cur_page = NULL;
214 uint32_t pte;
216 if (hw_tile_stride)
217 rows = num_pages / desired_tile_stride;
218 else
219 desired_tile_stride = num_pages;
221 add = desired_tile_stride;
222 row_add = hw_tile_stride;
224 down_read(&pg->sem);
225 for (i = 0; i < rows; ++i) {
226 cur_page = pg->gtt_map + offset_pages;
227 for (j = 0; j < desired_tile_stride; ++j) {
228 pte =
229 psb_gtt_mask_pte(page_to_pfn(*pages++), type);
230 iowrite32(pte, cur_page++);
232 offset_pages += add;
234 (void) ioread32(cur_page - 1);
235 up_read(&pg->sem);
237 return 0;
240 int psb_gtt_insert_phys_addresses(struct psb_gtt *pg, dma_addr_t *pPhysFrames,
241 unsigned offset_pages, unsigned num_pages, int type)
243 unsigned j;
244 uint32_t *cur_page = NULL;
245 uint32_t pte;
246 u32 ba;
248 down_read(&pg->sem);
249 cur_page = pg->gtt_map + offset_pages;
250 for (j = 0; j < num_pages; ++j) {
251 ba = *pPhysFrames++;
252 pte = psb_gtt_mask_pte(ba >> PAGE_SHIFT, type);
253 iowrite32(pte, cur_page++);
255 (void) ioread32(cur_page - 1);
256 up_read(&pg->sem);
257 return 0;
260 int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
261 unsigned num_pages, unsigned desired_tile_stride,
262 unsigned hw_tile_stride, int rc_prot)
264 struct drm_psb_private *dev_priv = pg->dev->dev_private;
265 unsigned rows = 1;
266 unsigned add;
267 unsigned row_add;
268 unsigned i;
269 unsigned j;
270 uint32_t *cur_page = NULL;
271 unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
272 uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
274 if (hw_tile_stride)
275 rows = num_pages / desired_tile_stride;
276 else
277 desired_tile_stride = num_pages;
279 add = desired_tile_stride;
280 row_add = hw_tile_stride;
282 if (rc_prot)
283 down_read(&pg->sem);
284 for (i = 0; i < rows; ++i) {
285 cur_page = pg->gtt_map + offset_pages;
286 for (j = 0; j < desired_tile_stride; ++j)
287 iowrite32(pte, cur_page++);
289 offset_pages += add;
291 (void) ioread32(cur_page - 1);
292 if (rc_prot)
293 up_read(&pg->sem);
295 return 0;
298 int psb_gtt_mm_init(struct psb_gtt *pg)
300 struct psb_gtt_mm *gtt_mm;
301 struct drm_psb_private *dev_priv = pg->dev->dev_private;
302 struct drm_open_hash *ht;
303 struct drm_mm *mm;
304 int ret;
305 uint32_t tt_start;
306 uint32_t tt_size;
308 if (!pg || !pg->initialized) {
309 DRM_DEBUG("Invalid gtt struct\n");
310 return -EINVAL;
313 gtt_mm = kzalloc(sizeof(struct psb_gtt_mm), GFP_KERNEL);
314 if (!gtt_mm)
315 return -ENOMEM;
317 spin_lock_init(&gtt_mm->lock);
319 ht = &gtt_mm->hash;
320 ret = drm_ht_create(ht, 20);
321 if (ret) {
322 DRM_DEBUG("Create hash table failed(%d)\n", ret);
323 goto err_free;
326 tt_start = (pg->stolen_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
327 tt_start = (tt_start < pg->gatt_pages) ? tt_start : pg->gatt_pages;
328 tt_size = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
329 (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
331 mm = &gtt_mm->base;
333 /*will use tt_start ~ 128M for IMG TT buffers*/
334 ret = drm_mm_init(mm, tt_start, ((tt_size / 2) - tt_start));
335 if (ret) {
336 DRM_DEBUG("drm_mm_int error(%d)\n", ret);
337 goto err_mm_init;
340 gtt_mm->count = 0;
342 dev_priv->gtt_mm = gtt_mm;
344 DRM_INFO("PSB GTT mem manager ready, tt_start %ld, tt_size %ld pages\n",
345 (unsigned long)tt_start,
346 (unsigned long)((tt_size / 2) - tt_start));
347 return 0;
348 err_mm_init:
349 drm_ht_remove(ht);
351 err_free:
352 kfree(gtt_mm);
353 return ret;
357 * Delete all hash entries;
359 void psb_gtt_mm_takedown(void)
361 return;
364 static int psb_gtt_mm_get_ht_by_pid_locked(struct psb_gtt_mm *mm,
365 u32 tgid,
366 struct psb_gtt_hash_entry **hentry)
368 struct drm_hash_item *entry;
369 struct psb_gtt_hash_entry *psb_entry;
370 int ret;
372 ret = drm_ht_find_item(&mm->hash, tgid, &entry);
373 if (ret) {
374 DRM_DEBUG("Cannot find entry pid=%ld\n", tgid);
375 return ret;
378 psb_entry = container_of(entry, struct psb_gtt_hash_entry, item);
379 if (!psb_entry) {
380 DRM_DEBUG("Invalid entry");
381 return -EINVAL;
384 *hentry = psb_entry;
385 return 0;
389 static int psb_gtt_mm_insert_ht_locked(struct psb_gtt_mm *mm,
390 u32 tgid,
391 struct psb_gtt_hash_entry *hentry)
393 struct drm_hash_item *item;
394 int ret;
396 if (!hentry) {
397 DRM_DEBUG("Invalid parameters\n");
398 return -EINVAL;
401 item = &hentry->item;
402 item->key = tgid;
405 * NOTE: drm_ht_insert_item will perform such a check
406 ret = psb_gtt_mm_get_ht_by_pid(mm, tgid, &tmp);
407 if (!ret) {
408 DRM_DEBUG("Entry already exists for pid %ld\n", tgid);
409 return -EAGAIN;
413 /*Insert the given entry*/
414 ret = drm_ht_insert_item(&mm->hash, item);
415 if (ret) {
416 DRM_DEBUG("Insert failure\n");
417 return ret;
420 mm->count++;
422 return 0;
425 static int psb_gtt_mm_alloc_insert_ht(struct psb_gtt_mm *mm,
426 u32 tgid,
427 struct psb_gtt_hash_entry **entry)
429 struct psb_gtt_hash_entry *hentry;
430 int ret;
432 /*if the hentry for this tgid exists, just get it and return*/
433 spin_lock(&mm->lock);
434 ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
435 if (!ret) {
436 DRM_DEBUG("Entry for tgid %ld exist, hentry %p\n",
437 tgid, hentry);
438 *entry = hentry;
439 spin_unlock(&mm->lock);
440 return 0;
442 spin_unlock(&mm->lock);
444 DRM_DEBUG("Entry for tgid %ld doesn't exist, will create it\n", tgid);
446 hentry = kzalloc(sizeof(struct psb_gtt_hash_entry), GFP_KERNEL);
447 if (!hentry) {
448 DRM_DEBUG("Kmalloc failled\n");
449 return -ENOMEM;
452 ret = drm_ht_create(&hentry->ht, 20);
453 if (ret) {
454 DRM_DEBUG("Create hash table failed\n");
455 return ret;
458 spin_lock(&mm->lock);
459 ret = psb_gtt_mm_insert_ht_locked(mm, tgid, hentry);
460 spin_unlock(&mm->lock);
462 if (!ret)
463 *entry = hentry;
465 return ret;
468 static struct psb_gtt_hash_entry *
469 psb_gtt_mm_remove_ht_locked(struct psb_gtt_mm *mm, u32 tgid)
471 struct psb_gtt_hash_entry *tmp;
472 int ret;
474 ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &tmp);
475 if (ret) {
476 DRM_DEBUG("Cannot find entry pid %ld\n", tgid);
477 return NULL;
480 /*remove it from ht*/
481 drm_ht_remove_item(&mm->hash, &tmp->item);
483 mm->count--;
485 return tmp;
488 static int psb_gtt_mm_remove_free_ht_locked(struct psb_gtt_mm *mm, u32 tgid)
490 struct psb_gtt_hash_entry *entry;
492 entry = psb_gtt_mm_remove_ht_locked(mm, tgid);
494 if (!entry) {
495 DRM_DEBUG("Invalid entry");
496 return -EINVAL;
499 /*delete ht*/
500 drm_ht_remove(&entry->ht);
502 /*free this entry*/
503 kfree(entry);
504 return 0;
507 static int
508 psb_gtt_mm_get_mem_mapping_locked(struct drm_open_hash *ht,
509 u32 key,
510 struct psb_gtt_mem_mapping **hentry)
512 struct drm_hash_item *entry;
513 struct psb_gtt_mem_mapping *mapping;
514 int ret;
516 ret = drm_ht_find_item(ht, key, &entry);
517 if (ret) {
518 DRM_DEBUG("Cannot find key %ld\n", key);
519 return ret;
522 mapping = container_of(entry, struct psb_gtt_mem_mapping, item);
523 if (!mapping) {
524 DRM_DEBUG("Invalid entry\n");
525 return -EINVAL;
528 *hentry = mapping;
529 return 0;
532 static int
533 psb_gtt_mm_insert_mem_mapping_locked(struct drm_open_hash *ht,
534 u32 key,
535 struct psb_gtt_mem_mapping *hentry)
537 struct drm_hash_item *item;
538 struct psb_gtt_hash_entry *entry;
539 int ret;
541 if (!hentry) {
542 DRM_DEBUG("hentry is NULL\n");
543 return -EINVAL;
546 item = &hentry->item;
547 item->key = key;
549 ret = drm_ht_insert_item(ht, item);
550 if (ret) {
551 DRM_DEBUG("insert_item failed\n");
552 return ret;
555 entry = container_of(ht, struct psb_gtt_hash_entry, ht);
556 if (entry)
557 entry->count++;
559 return 0;
562 static int
563 psb_gtt_mm_alloc_insert_mem_mapping(struct psb_gtt_mm *mm,
564 struct drm_open_hash *ht,
565 u32 key,
566 struct drm_mm_node *node,
567 struct psb_gtt_mem_mapping **entry)
569 struct psb_gtt_mem_mapping *mapping;
570 int ret;
572 if (!node || !ht) {
573 DRM_DEBUG("parameter error\n");
574 return -EINVAL;
577 /*try to get this mem_map */
578 spin_lock(&mm->lock);
579 ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &mapping);
580 if (!ret) {
581 DRM_DEBUG("mapping entry for key %ld exists, entry %p\n",
582 key, mapping);
583 *entry = mapping;
584 spin_unlock(&mm->lock);
585 return 0;
587 spin_unlock(&mm->lock);
589 DRM_DEBUG("Mapping entry for key %ld doesn't exist, will create it\n",
590 key);
592 mapping = kzalloc(sizeof(struct psb_gtt_mem_mapping), GFP_KERNEL);
593 if (!mapping) {
594 DRM_DEBUG("kmalloc failed\n");
595 return -ENOMEM;
598 mapping->node = node;
600 spin_lock(&mm->lock);
601 ret = psb_gtt_mm_insert_mem_mapping_locked(ht, key, mapping);
602 spin_unlock(&mm->lock);
604 if (!ret)
605 *entry = mapping;
607 return ret;
610 static struct psb_gtt_mem_mapping *
611 psb_gtt_mm_remove_mem_mapping_locked(struct drm_open_hash *ht, u32 key)
613 struct psb_gtt_mem_mapping *tmp;
614 struct psb_gtt_hash_entry *entry;
615 int ret;
617 ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &tmp);
618 if (ret) {
619 DRM_DEBUG("Cannot find key %ld\n", key);
620 return NULL;
623 drm_ht_remove_item(ht, &tmp->item);
625 entry = container_of(ht, struct psb_gtt_hash_entry, ht);
626 if (entry)
627 entry->count--;
629 return tmp;
632 static int psb_gtt_mm_remove_free_mem_mapping_locked(struct drm_open_hash *ht,
633 u32 key,
634 struct drm_mm_node **node)
636 struct psb_gtt_mem_mapping *entry;
638 entry = psb_gtt_mm_remove_mem_mapping_locked(ht, key);
639 if (!entry) {
640 DRM_DEBUG("entry is NULL\n");
641 return -EINVAL;
644 *node = entry->node;
646 kfree(entry);
647 return 0;
650 static int psb_gtt_add_node(struct psb_gtt_mm *mm,
651 u32 tgid,
652 u32 key,
653 struct drm_mm_node *node,
654 struct psb_gtt_mem_mapping **entry)
656 struct psb_gtt_hash_entry *hentry;
657 struct psb_gtt_mem_mapping *mapping;
658 int ret;
660 ret = psb_gtt_mm_alloc_insert_ht(mm, tgid, &hentry);
661 if (ret) {
662 DRM_DEBUG("alloc_insert failed\n");
663 return ret;
666 ret = psb_gtt_mm_alloc_insert_mem_mapping(mm,
667 &hentry->ht,
668 key,
669 node,
670 &mapping);
671 if (ret) {
672 DRM_DEBUG("mapping alloc_insert failed\n");
673 return ret;
676 *entry = mapping;
678 return 0;
681 static int psb_gtt_remove_node(struct psb_gtt_mm *mm,
682 u32 tgid,
683 u32 key,
684 struct drm_mm_node **node)
686 struct psb_gtt_hash_entry *hentry;
687 struct drm_mm_node *tmp;
688 int ret;
690 spin_lock(&mm->lock);
691 ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
692 if (ret) {
693 DRM_DEBUG("Cannot find entry for pid %ld\n", tgid);
694 spin_unlock(&mm->lock);
695 return ret;
697 spin_unlock(&mm->lock);
699 /*remove mapping entry*/
700 spin_lock(&mm->lock);
701 ret = psb_gtt_mm_remove_free_mem_mapping_locked(&hentry->ht,
702 key,
703 &tmp);
704 if (ret) {
705 DRM_DEBUG("remove_free failed\n");
706 spin_unlock(&mm->lock);
707 return ret;
710 *node = tmp;
712 /*check the count of mapping entry*/
713 if (!hentry->count) {
714 DRM_DEBUG("count of mapping entry is zero, tgid=%ld\n", tgid);
715 psb_gtt_mm_remove_free_ht_locked(mm, tgid);
718 spin_unlock(&mm->lock);
720 return 0;
723 static int psb_gtt_mm_alloc_mem(struct psb_gtt_mm *mm,
724 uint32_t pages,
725 uint32_t align,
726 struct drm_mm_node **node)
728 struct drm_mm_node *tmp_node;
729 int ret;
731 do {
732 ret = drm_mm_pre_get(&mm->base);
733 if (unlikely(ret)) {
734 DRM_DEBUG("drm_mm_pre_get error\n");
735 return ret;
738 spin_lock(&mm->lock);
739 tmp_node = drm_mm_search_free(&mm->base, pages, align, 1);
740 if (unlikely(!tmp_node)) {
741 DRM_DEBUG("No free node found\n");
742 spin_unlock(&mm->lock);
743 break;
746 tmp_node = drm_mm_get_block_atomic(tmp_node, pages, align);
747 spin_unlock(&mm->lock);
748 } while (!tmp_node);
750 if (!tmp_node) {
751 DRM_DEBUG("Node allocation failed\n");
752 return -ENOMEM;
755 *node = tmp_node;
756 return 0;
759 static void psb_gtt_mm_free_mem(struct psb_gtt_mm *mm, struct drm_mm_node *node)
761 spin_lock(&mm->lock);
762 drm_mm_put_block(node);
763 spin_unlock(&mm->lock);
766 int psb_gtt_map_meminfo(struct drm_device *dev,
767 void *hKernelMemInfo,
768 uint32_t *offset)
770 return -EINVAL;
771 /* FIXMEAC */
772 #if 0
773 struct drm_psb_private *dev_priv
774 = (struct drm_psb_private *)dev->dev_private;
775 void *psKernelMemInfo;
776 struct psb_gtt_mm *mm = dev_priv->gtt_mm;
777 struct psb_gtt *pg = dev_priv->pg;
778 uint32_t size, pages, offset_pages;
779 void *kmem;
780 struct drm_mm_node *node;
781 struct page **page_list;
782 struct psb_gtt_mem_mapping *mapping = NULL;
783 int ret;
785 ret = psb_get_meminfo_by_handle(hKernelMemInfo, &psKernelMemInfo);
786 if (ret) {
787 DRM_DEBUG("Cannot find kernelMemInfo handle %ld\n",
788 hKernelMemInfo);
789 return -EINVAL;
792 DRM_DEBUG("Got psKernelMemInfo %p for handle %lx\n",
793 psKernelMemInfo, (u32)hKernelMemInfo);
794 size = psKernelMemInfo->ui32AllocSize;
795 kmem = psKernelMemInfo->pvLinAddrKM;
796 pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
798 DRM_DEBUG("KerMemInfo size %ld, cpuVadr %lx, pages %ld, osMemHdl %lx\n",
799 size, kmem, pages, psKernelMemInfo->sMemBlk.hOSMemHandle);
801 if (!kmem)
802 DRM_DEBUG("kmem is NULL");
804 /*get pages*/
805 ret = psb_get_pages_by_mem_handle(psKernelMemInfo->sMemBlk.hOSMemHandle,
806 &page_list);
807 if (ret) {
808 DRM_DEBUG("get pages error\n");
809 return ret;
812 DRM_DEBUG("get %ld pages\n", pages);
814 /*alloc memory in TT apeture*/
815 ret = psb_gtt_mm_alloc_mem(mm, pages, 0, &node);
816 if (ret) {
817 DRM_DEBUG("alloc TT memory error\n");
818 goto failed_pages_alloc;
821 /*update psb_gtt_mm*/
822 ret = psb_gtt_add_node(mm,
823 task_tgid_nr(current),
824 (u32)hKernelMemInfo,
825 node,
826 &mapping);
827 if (ret) {
828 DRM_DEBUG("add_node failed");
829 goto failed_add_node;
832 node = mapping->node;
833 offset_pages = node->start;
835 DRM_DEBUG("get free node for %ld pages, offset %ld pages",
836 pages, offset_pages);
838 /*update gtt*/
839 psb_gtt_insert_pages(pg, page_list,
840 (unsigned)offset_pages,
841 (unsigned)pages,
846 *offset = offset_pages;
847 return 0;
849 failed_add_node:
850 psb_gtt_mm_free_mem(mm, node);
851 failed_pages_alloc:
852 kfree(page_list);
853 return ret;
854 #endif
857 int psb_gtt_unmap_meminfo(struct drm_device *dev, void * hKernelMemInfo)
859 struct drm_psb_private *dev_priv
860 = (struct drm_psb_private *)dev->dev_private;
861 struct psb_gtt_mm *mm = dev_priv->gtt_mm;
862 struct psb_gtt *pg = dev_priv->pg;
863 uint32_t pages, offset_pages;
864 struct drm_mm_node *node;
865 int ret;
867 ret = psb_gtt_remove_node(mm,
868 task_tgid_nr(current),
869 (u32)hKernelMemInfo,
870 &node);
871 if (ret) {
872 DRM_DEBUG("remove node failed\n");
873 return ret;
876 /*remove gtt entries*/
877 offset_pages = node->start;
878 pages = node->size;
880 psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0, 1);
883 /*free tt node*/
885 psb_gtt_mm_free_mem(mm, node);
886 return 0;
890 * GTT resource allocator
894 * psb_gtt_alloc_handle - allocate a handle to a GTT map
895 * @dev: our DRM device
896 * @gt: Our GTT range
898 * Assign a handle to a gtt range object. For the moment we use a very
899 * simplistic interface.
901 int psb_gtt_alloc_handle(struct drm_device *dev, struct gtt_range *gt)
903 struct drm_psb_private *dev_priv = dev->dev_private;
904 int h;
906 mutex_lock(&dev_priv->gtt_mutex);
907 for (h = 0; h < GTT_MAX; h++) {
908 if (dev_priv->gtt_handles[h] == NULL) {
909 dev_priv->gtt_handles[h] = gt;
910 gt->handle = h;
911 kref_get(&gt->kref);
912 mutex_unlock(&dev_priv->gtt_mutex);
913 return h;
916 mutex_unlock(&dev_priv->gtt_mutex);
917 return -ENOSPC;
921 * psb_gtt_release_handle - release a handle to a GTT map
922 * @dev: our DRM device
923 * @gt: Our GTT range
925 * Remove the handle from a gtt range object
927 int psb_gtt_release_handle(struct drm_device *dev, struct gtt_range *gt)
929 struct drm_psb_private *dev_priv = dev->dev_private;
931 if (gt->handle < 0 || gt->handle >= GTT_MAX) {
932 gt->handle = -1;
933 WARN_ON(1);
934 return -EINVAL;
936 mutex_lock(&dev_priv->gtt_mutex);
937 dev_priv->gtt_handles[gt->handle] = NULL;
938 gt->handle = -1;
939 mutex_unlock(&dev_priv->gtt_mutex);
940 psb_gtt_kref_put(gt);
941 return 0;
945 * psb_gtt_lookup_handle - look up a GTT handle
946 * @dev: our DRM device
947 * @handle: our handle
949 * Look up a gtt handle and return the gtt or NULL. The object returned
950 * has a reference held so the caller must drop this when finished.
952 struct gtt_range *psb_gtt_lookup_handle(struct drm_device *dev, int handle)
954 struct drm_psb_private *dev_priv = dev->dev_private;
955 struct gtt_range *gt;
957 if (handle < 0 || handle > GTT_MAX)
958 return ERR_PTR(-EINVAL);
960 mutex_lock(&dev_priv->gtt_mutex);
961 gt = dev_priv->gtt_handles[handle];
962 kref_get(&gt->kref);
963 mutex_unlock(&dev_priv->gtt_mutex);
965 if (gt == NULL)
966 return ERR_PTR(-ENOENT);
967 return gt;
971 * psb_gtt_alloc_range - allocate GTT address space
972 * @dev: Our DRM device
973 * @len: length (bytes) of address space required
974 * @name: resource name
976 * Ask the kernel core to find us a suitable range of addresses
977 * to use for a GTT mapping.
979 * Returns a gtt_range structure describing the object, or NULL on
980 * error. On successful return the resource is both allocated and marked
981 * as in use.
983 struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
984 const char *name)
986 struct drm_psb_private *dev_priv = dev->dev_private;
987 struct gtt_range *gt;
988 struct resource *r = dev_priv->gtt_mem;
989 int ret;
991 gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
992 if (gt == NULL)
993 return NULL;
994 gt->handle = -1;
995 gt->resource.name = name;
996 kref_init(&gt->kref);
998 ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
999 len, 0, -1, /*r->start, r->end - 1, */
1000 PAGE_SIZE, NULL, NULL);
1001 if (ret == 0) {
1002 gt->offset = gt->resource.start - r->start;
1003 return gt;
1005 kfree(gt);
1006 return NULL;
1009 static void psb_gtt_destroy(struct kref *kref)
1011 struct gtt_range *gt = container_of(kref, struct gtt_range, kref);
1012 release_resource(&gt->resource);
1013 kfree(gt);
1017 * psb_gtt_kref_put - drop reference to a GTT object
1018 * @gt: the GT being dropped
1020 * Drop a reference to a psb gtt
1022 void psb_gtt_kref_put(struct gtt_range *gt)
1024 kref_put(&gt->kref, psb_gtt_destroy);
1028 * psb_gtt_free_range - release GTT address space
1029 * @dev: our DRM device
1030 * @gt: a mapping created with psb_gtt_alloc_range
1032 * Release a resource that was allocated with psb_gtt_alloc_range
1034 void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
1036 if (gt->handle != -1)
1037 psb_gtt_release_handle(dev, gt);
1038 psb_gtt_kref_put(gt);