drm: Define struct page and use it everywhere
[dragonfly.git] / sys / dev / drm / ttm / ttm_memory.c
blob9cc01b0e2d99f61d69039b4b45d985d21b2af12e
1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 * $FreeBSD: head/sys/dev/drm2/ttm/ttm_memory.c 248663 2013-03-23 20:46:47Z dumbbell $
27 **************************************************************************/
29 #define pr_fmt(fmt) "[TTM] " fmt
31 #include <drm/drmP.h>
32 #include <drm/ttm/ttm_memory.h>
33 #include <drm/ttm/ttm_module.h>
34 #include <drm/ttm/ttm_page_alloc.h>
35 #include <linux/export.h>
37 #define TTM_MEMORY_ALLOC_RETRIES 4
39 struct ttm_mem_zone {
40 u_int kobj_ref;
41 struct ttm_mem_global *glob;
42 const char *name;
43 uint64_t zone_mem;
44 uint64_t emer_mem;
45 uint64_t max_mem;
46 uint64_t swap_limit;
47 uint64_t used_mem;
50 static void ttm_mem_zone_kobj_release(struct ttm_mem_zone *zone)
53 pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
54 zone->name, (unsigned long long)zone->used_mem >> 10);
55 kfree(zone);
58 #if 0
59 /* XXXKIB sysctl */
60 static ssize_t ttm_mem_zone_show(struct ttm_mem_zone *zone;
61 struct attribute *attr,
62 char *buffer)
64 uint64_t val = 0;
66 mtx_lock(&zone->glob->lock);
67 if (attr == &ttm_mem_sys)
68 val = zone->zone_mem;
69 else if (attr == &ttm_mem_emer)
70 val = zone->emer_mem;
71 else if (attr == &ttm_mem_max)
72 val = zone->max_mem;
73 else if (attr == &ttm_mem_swap)
74 val = zone->swap_limit;
75 else if (attr == &ttm_mem_used)
76 val = zone->used_mem;
77 mtx_unlock(&zone->glob->lock);
79 return snprintf(buffer, PAGE_SIZE, "%llu\n",
80 (unsigned long long) val >> 10);
82 #endif
84 static void ttm_check_swapping(struct ttm_mem_global *glob);
86 #if 0
87 /* XXXKIB sysctl */
88 static ssize_t ttm_mem_zone_store(struct ttm_mem_zone *zone,
89 struct attribute *attr,
90 const char *buffer,
91 size_t size)
93 int chars;
94 unsigned long val;
95 uint64_t val64;
97 chars = sscanf(buffer, "%lu", &val);
98 if (chars == 0)
99 return size;
101 val64 = val;
102 val64 <<= 10;
104 mtx_lock(&zone->glob->lock);
105 if (val64 > zone->zone_mem)
106 val64 = zone->zone_mem;
107 if (attr == &ttm_mem_emer) {
108 zone->emer_mem = val64;
109 if (zone->max_mem > val64)
110 zone->max_mem = val64;
111 } else if (attr == &ttm_mem_max) {
112 zone->max_mem = val64;
113 if (zone->emer_mem < val64)
114 zone->emer_mem = val64;
115 } else if (attr == &ttm_mem_swap)
116 zone->swap_limit = val64;
117 mtx_unlock(&zone->glob->lock);
119 ttm_check_swapping(zone->glob);
121 return size;
123 #endif
125 static void ttm_mem_global_kobj_release(struct ttm_mem_global *glob)
129 static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
130 bool from_wq, uint64_t extra)
132 unsigned int i;
133 struct ttm_mem_zone *zone;
134 uint64_t target;
136 for (i = 0; i < glob->num_zones; ++i) {
137 zone = glob->zones[i];
139 if (from_wq)
140 target = zone->swap_limit;
141 else if (priv_check(curthread, PRIV_VM_MLOCK) == 0)
142 target = zone->emer_mem;
143 else
144 target = zone->max_mem;
146 target = (extra > target) ? 0ULL : target;
148 if (zone->used_mem > target)
149 return true;
151 return false;
155 * At this point we only support a single shrink callback.
156 * Extend this if needed, perhaps using a linked list of callbacks.
157 * Note that this function is reentrant:
158 * many threads may try to swap out at any given time.
161 static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
162 uint64_t extra)
164 int ret;
165 struct ttm_mem_shrink *shrink;
167 spin_lock(&glob->spin);
168 if (glob->shrink == NULL)
169 goto out;
171 while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
172 shrink = glob->shrink;
173 spin_unlock(&glob->spin);
174 ret = shrink->do_shrink(shrink);
175 spin_lock(&glob->spin);
176 if (unlikely(ret != 0))
177 goto out;
179 out:
180 spin_unlock(&glob->spin);
185 static void ttm_shrink_work(void *arg, int pending __unused)
187 struct ttm_mem_global *glob = arg;
189 ttm_shrink(glob, true, 0ULL);
192 static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
193 uint64_t mem)
195 struct ttm_mem_zone *zone;
197 zone = kzalloc(sizeof(*zone), GFP_KERNEL);
199 zone->name = "kernel";
200 zone->zone_mem = mem;
201 zone->max_mem = mem >> 1;
202 zone->emer_mem = (mem >> 1) + (mem >> 2);
203 zone->swap_limit = zone->max_mem - (mem >> 3);
204 zone->used_mem = 0;
205 zone->glob = glob;
206 glob->zone_kernel = zone;
207 refcount_init(&zone->kobj_ref, 1);
208 glob->zones[glob->num_zones++] = zone;
209 return 0;
212 static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
213 uint64_t mem)
215 struct ttm_mem_zone *zone;
217 zone = kzalloc(sizeof(*zone), GFP_KERNEL);
220 * No special dma32 zone needed.
223 if ((physmem * PAGE_SIZE) <= ((uint64_t) 1ULL << 32)) {
224 kfree(zone);
225 return 0;
229 * Limit max dma32 memory to 4GB for now
230 * until we can figure out how big this
231 * zone really is.
233 if (mem > ((uint64_t) 1ULL << 32))
234 mem = ((uint64_t) 1ULL << 32);
236 zone->name = "dma32";
237 zone->zone_mem = mem;
238 zone->max_mem = mem >> 1;
239 zone->emer_mem = (mem >> 1) + (mem >> 2);
240 zone->swap_limit = zone->max_mem - (mem >> 3);
241 zone->used_mem = 0;
242 zone->glob = glob;
243 glob->zone_dma32 = zone;
244 refcount_init(&zone->kobj_ref, 1);
245 glob->zones[glob->num_zones++] = zone;
246 return 0;
249 int ttm_mem_global_init(struct ttm_mem_global *glob)
251 u_int64_t mem;
252 int ret;
253 int i;
254 struct ttm_mem_zone *zone;
256 spin_init(&glob->spin, "ttmemglob");
257 glob->swap_queue = taskqueue_create("ttm_swap", M_WAITOK,
258 taskqueue_thread_enqueue, &glob->swap_queue);
259 taskqueue_start_threads(&glob->swap_queue, 1, TDPRI_KERN_DAEMON,
260 -1, "ttm swap");
261 TASK_INIT(&glob->work, 0, ttm_shrink_work, glob);
263 refcount_init(&glob->kobj_ref, 1);
266 * Managed contiguous memory for TTM. Only use kernel-reserved
267 * dma memory for TTM, which can be controlled via /boot/loader.conf
268 * (e.g. vm.dma_reserved=256m). This is the only truly dependable
269 * DMA memory.
271 mem = (uint64_t)vm_contig_avail_pages() * PAGE_SIZE;
273 ret = ttm_mem_init_kernel_zone(glob, mem);
274 if (unlikely(ret != 0))
275 goto out_no_zone;
276 ret = ttm_mem_init_dma32_zone(glob, mem);
277 if (unlikely(ret != 0))
278 goto out_no_zone;
279 pr_info("(struct ttm_mem_global *)%p\n", glob);
280 for (i = 0; i < glob->num_zones; ++i) {
281 zone = glob->zones[i];
282 pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
283 zone->name, (unsigned long long)zone->max_mem >> 10);
285 ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
286 ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
287 return 0;
288 out_no_zone:
289 ttm_mem_global_release(glob);
290 return ret;
292 EXPORT_SYMBOL(ttm_mem_global_init);
294 void ttm_mem_global_release(struct ttm_mem_global *glob)
296 unsigned int i;
297 struct ttm_mem_zone *zone;
299 /* let the page allocator first stop the shrink work. */
300 ttm_page_alloc_fini();
301 ttm_dma_page_alloc_fini();
303 taskqueue_drain(glob->swap_queue, &glob->work);
304 taskqueue_free(glob->swap_queue);
305 glob->swap_queue = NULL;
306 for (i = 0; i < glob->num_zones; ++i) {
307 zone = glob->zones[i];
308 if (refcount_release(&zone->kobj_ref))
309 ttm_mem_zone_kobj_release(zone);
311 if (refcount_release(&glob->kobj_ref))
312 ttm_mem_global_kobj_release(glob);
314 EXPORT_SYMBOL(ttm_mem_global_release);
316 static void ttm_check_swapping(struct ttm_mem_global *glob)
318 bool needs_swapping = false;
319 unsigned int i;
320 struct ttm_mem_zone *zone;
322 spin_lock(&glob->spin);
323 for (i = 0; i < glob->num_zones; ++i) {
324 zone = glob->zones[i];
325 if (zone->used_mem > zone->swap_limit) {
326 needs_swapping = true;
327 break;
330 spin_unlock(&glob->spin);
332 if (unlikely(needs_swapping))
333 taskqueue_enqueue(glob->swap_queue, &glob->work);
337 static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
338 struct ttm_mem_zone *single_zone,
339 uint64_t amount)
341 unsigned int i;
342 struct ttm_mem_zone *zone;
344 spin_lock(&glob->spin);
345 for (i = 0; i < glob->num_zones; ++i) {
346 zone = glob->zones[i];
347 if (single_zone && zone != single_zone)
348 continue;
349 zone->used_mem -= amount;
351 spin_unlock(&glob->spin);
354 void ttm_mem_global_free(struct ttm_mem_global *glob,
355 uint64_t amount)
357 ttm_mem_global_free_zone(glob, NULL, amount);
359 EXPORT_SYMBOL(ttm_mem_global_free);
361 static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
362 struct ttm_mem_zone *single_zone,
363 uint64_t amount, bool reserve)
365 uint64_t limit;
366 int ret = -ENOMEM;
367 unsigned int i;
368 struct ttm_mem_zone *zone;
370 spin_lock(&glob->spin);
371 for (i = 0; i < glob->num_zones; ++i) {
372 zone = glob->zones[i];
373 if (single_zone && zone != single_zone)
374 continue;
376 limit = (priv_check(curthread, PRIV_VM_MLOCK) == 0) ?
377 zone->emer_mem : zone->max_mem;
379 if (zone->used_mem > limit)
380 goto out_unlock;
383 if (reserve) {
384 for (i = 0; i < glob->num_zones; ++i) {
385 zone = glob->zones[i];
386 if (single_zone && zone != single_zone)
387 continue;
388 zone->used_mem += amount;
392 ret = 0;
393 out_unlock:
394 spin_unlock(&glob->spin);
395 ttm_check_swapping(glob);
397 return ret;
401 static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
402 struct ttm_mem_zone *single_zone,
403 uint64_t memory,
404 bool no_wait, bool interruptible)
406 int count = TTM_MEMORY_ALLOC_RETRIES;
408 while (unlikely(ttm_mem_global_reserve(glob,
409 single_zone,
410 memory, true)
411 != 0)) {
412 if (no_wait)
413 return -ENOMEM;
414 if (unlikely(count-- == 0))
415 return -ENOMEM;
416 ttm_shrink(glob, false, memory + (memory >> 2) + 16);
419 return 0;
422 int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
423 bool no_wait, bool interruptible)
426 * Normal allocations of kernel memory are registered in
427 * all zones.
430 return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
431 interruptible);
433 EXPORT_SYMBOL(ttm_mem_global_alloc);
435 int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
436 struct page *page,
437 bool no_wait, bool interruptible)
440 struct ttm_mem_zone *zone = NULL;
443 * Page allocations may be registed in a single zone
444 * only if highmem or !dma32.
447 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
448 zone = glob->zone_kernel;
449 return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
450 interruptible);
453 void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
455 struct ttm_mem_zone *zone = NULL;
457 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
458 zone = glob->zone_kernel;
459 ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
463 size_t ttm_round_pot(size_t size)
465 if ((size & (size - 1)) == 0)
466 return size;
467 else if (size > PAGE_SIZE)
468 return PAGE_ALIGN(size);
469 else {
470 size_t tmp_size = 4;
472 while (tmp_size < size)
473 tmp_size <<= 1;
475 return tmp_size;
477 return 0;
479 EXPORT_SYMBOL(ttm_round_pot);