[PATCH] V4L: follow changes in saa7146
[linux-2.6/history.git] / mm / nommu.c
blob68e6b32dea6a646b76b6541965f4f6e23d7c1fba
1 /*
2 * linux/mm/nommu.c
4 * Replacement code for mm functions to support CPU's that don't
5 * have any form of memory management unit (thus no virtual memory).
7 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
8 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
9 * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/swap.h>
15 #include <linux/smp_lock.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/blkdev.h>
21 #include <linux/backing-dev.h>
22 #include <linux/syscalls.h>
24 #include <asm/uaccess.h>
25 #include <asm/tlb.h>
26 #include <asm/tlbflush.h>
28 void *high_memory;
29 struct page *mem_map;
30 unsigned long max_mapnr;
31 unsigned long num_physpages;
32 unsigned long askedalloc, realalloc;
33 atomic_t vm_committed_space = ATOMIC_INIT(0);
34 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
35 int sysctl_overcommit_ratio = 50; /* default is 50% */
37 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
38 EXPORT_SYMBOL(sysctl_max_map_count);
41 * Handle all mappings that got truncated by a "truncate()"
42 * system call.
44 * NOTE! We have to be ready to update the memory sharing
45 * between the file and the memory map for a potential last
46 * incomplete page. Ugly, but necessary.
48 int vmtruncate(struct inode *inode, loff_t offset)
50 struct address_space *mapping = inode->i_mapping;
51 unsigned long limit;
53 if (inode->i_size < offset)
54 goto do_expand;
55 i_size_write(inode, offset);
57 truncate_inode_pages(mapping, offset);
58 goto out_truncate;
60 do_expand:
61 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62 if (limit != RLIM_INFINITY && offset > limit)
63 goto out_sig;
64 if (offset > inode->i_sb->s_maxbytes)
65 goto out;
66 i_size_write(inode, offset);
68 out_truncate:
69 if (inode->i_op && inode->i_op->truncate)
70 inode->i_op->truncate(inode);
71 return 0;
72 out_sig:
73 send_sig(SIGXFSZ, current, 0);
74 out:
75 return -EFBIG;
79 * Return the total memory allocated for this pointer, not
80 * just what the caller asked for.
82 * Doesn't have to be accurate, i.e. may have races.
84 unsigned int kobjsize(const void *objp)
86 struct page *page;
88 if (!objp || !((page = virt_to_page(objp))))
89 return 0;
91 if (PageSlab(page))
92 return ksize(objp);
94 BUG_ON(page->index < 0);
95 BUG_ON(page->index >= MAX_ORDER);
97 return (PAGE_SIZE << page->index);
101 * The nommu dodgy version :-)
103 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
104 unsigned long start, int len, int write, int force,
105 struct page **pages, struct vm_area_struct **vmas)
107 int i;
108 static struct vm_area_struct dummy_vma;
110 for (i = 0; i < len; i++) {
111 if (pages) {
112 pages[i] = virt_to_page(start);
113 if (pages[i])
114 page_cache_get(pages[i]);
116 if (vmas)
117 vmas[i] = &dummy_vma;
118 start += PAGE_SIZE;
120 return(i);
123 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
124 struct vm_struct *vmlist;
126 void vfree(void *addr)
128 kfree(addr);
131 void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
134 * kmalloc doesn't like __GFP_HIGHMEM for some reason
136 return kmalloc(size, gfp_mask & ~__GFP_HIGHMEM);
139 struct page * vmalloc_to_page(void *addr)
141 return virt_to_page(addr);
144 long vread(char *buf, char *addr, unsigned long count)
146 memcpy(buf, addr, count);
147 return count;
150 long vwrite(char *buf, char *addr, unsigned long count)
152 /* Don't allow overflow */
153 if ((unsigned long) addr + count < count)
154 count = -(unsigned long) addr;
156 memcpy(addr, buf, count);
157 return(count);
161 * vmalloc - allocate virtually continguos memory
163 * @size: allocation size
165 * Allocate enough pages to cover @size from the page level
166 * allocator and map them into continguos kernel virtual space.
168 * For tight cotrol over page level allocator and protection flags
169 * use __vmalloc() instead.
171 void *vmalloc(unsigned long size)
173 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
177 * vmalloc_32 - allocate virtually continguos memory (32bit addressable)
179 * @size: allocation size
181 * Allocate enough 32bit PA addressable pages to cover @size from the
182 * page level allocator and map them into continguos kernel virtual space.
184 void *vmalloc_32(unsigned long size)
186 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
189 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
191 BUG();
192 return NULL;
195 void vunmap(void *addr)
197 BUG();
201 * sys_brk() for the most part doesn't need the global kernel
202 * lock, except when an application is doing something nasty
203 * like trying to un-brk an area that has already been mapped
204 * to a regular file. in this case, the unmapping will need
205 * to invoke file system routines that need the global lock.
207 asmlinkage unsigned long sys_brk(unsigned long brk)
209 struct mm_struct *mm = current->mm;
211 if (brk < mm->end_code || brk < mm->start_brk || brk > mm->context.end_brk)
212 return mm->brk;
214 if (mm->brk == brk)
215 return mm->brk;
218 * Always allow shrinking brk
220 if (brk <= mm->brk) {
221 mm->brk = brk;
222 return brk;
226 * Ok, looks good - let it rip.
228 return mm->brk = brk;
232 * Combine the mmap "prot" and "flags" argument into one "vm_flags" used
233 * internally. Essentially, translate the "PROT_xxx" and "MAP_xxx" bits
234 * into "VM_xxx".
236 static inline unsigned long calc_vm_flags(unsigned long prot, unsigned long flags)
238 #define _trans(x,bit1,bit2) \
239 ((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
241 unsigned long prot_bits, flag_bits;
242 prot_bits =
243 _trans(prot, PROT_READ, VM_READ) |
244 _trans(prot, PROT_WRITE, VM_WRITE) |
245 _trans(prot, PROT_EXEC, VM_EXEC);
246 flag_bits =
247 _trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) |
248 _trans(flags, MAP_DENYWRITE, VM_DENYWRITE) |
249 _trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE);
250 return prot_bits | flag_bits;
251 #undef _trans
254 #ifdef DEBUG
255 static void show_process_blocks(void)
257 struct mm_tblock_struct *tblock;
259 printk("Process blocks %d:", current->pid);
261 for (tblock = &current->mm->context.tblock; tblock; tblock = tblock->next) {
262 printk(" %p: %p", tblock, tblock->rblock);
263 if (tblock->rblock)
264 printk(" (%d @%p #%d)", kobjsize(tblock->rblock->kblock), tblock->rblock->kblock, tblock->rblock->refcount);
265 printk(tblock->next ? " ->" : ".\n");
268 #endif /* DEBUG */
270 unsigned long do_mmap_pgoff(
271 struct file * file,
272 unsigned long addr,
273 unsigned long len,
274 unsigned long prot,
275 unsigned long flags,
276 unsigned long pgoff)
278 void * result;
279 struct mm_tblock_struct * tblock;
280 unsigned int vm_flags;
283 * Get the !CONFIG_MMU specific checks done first
285 if ((flags & MAP_SHARED) && (prot & PROT_WRITE) && (file)) {
286 printk("MAP_SHARED not supported (cannot write mappings to disk)\n");
287 return -EINVAL;
290 if ((prot & PROT_WRITE) && (flags & MAP_PRIVATE)) {
291 printk("Private writable mappings not supported\n");
292 return -EINVAL;
296 * now all the standard checks
298 if (file && (!file->f_op || !file->f_op->mmap))
299 return -ENODEV;
301 if (PAGE_ALIGN(len) == 0)
302 return addr;
304 if (len > TASK_SIZE)
305 return -EINVAL;
307 /* offset overflow? */
308 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
309 return -EINVAL;
311 /* Do simple checking here so the lower-level routines won't have
312 * to. we assume access permissions have been handled by the open
313 * of the memory object, so we don't do any here.
315 vm_flags = calc_vm_flags(prot,flags) /* | mm->def_flags */ | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
318 * determine the object being mapped and call the appropriate
319 * specific mapper.
321 if (file) {
322 struct vm_area_struct vma;
323 int error;
325 if (!file->f_op)
326 return -ENODEV;
328 vma.vm_start = addr;
329 vma.vm_end = addr + len;
330 vma.vm_flags = vm_flags;
331 vma.vm_pgoff = pgoff;
333 #ifdef MAGIC_ROM_PTR
334 /* First, try simpler routine designed to give us a ROM pointer. */
336 if (file->f_op->romptr && !(prot & PROT_WRITE)) {
337 error = file->f_op->romptr(file, &vma);
338 #ifdef DEBUG
339 printk("romptr mmap returned %d, start 0x%.8x\n", error,
340 vma.vm_start);
341 #endif
342 if (!error)
343 return vma.vm_start;
344 else if (error != -ENOSYS)
345 return error;
346 } else
347 #endif /* MAGIC_ROM_PTR */
348 /* Then try full mmap routine, which might return a RAM pointer,
349 or do something truly complicated. */
351 if (file->f_op->mmap) {
352 error = file->f_op->mmap(file, &vma);
354 #ifdef DEBUG
355 printk("f_op->mmap() returned %d/%lx\n", error, vma.vm_start);
356 #endif
357 if (!error)
358 return vma.vm_start;
359 else if (error != -ENOSYS)
360 return error;
361 } else
362 return -ENODEV; /* No mapping operations defined */
364 /* An ENOSYS error indicates that mmap isn't possible (as opposed to
365 tried but failed) so we'll fall through to the copy. */
368 tblock = (struct mm_tblock_struct *)
369 kmalloc(sizeof(struct mm_tblock_struct), GFP_KERNEL);
370 if (!tblock) {
371 printk("Allocation of tblock for %lu byte allocation from process %d failed\n", len, current->pid);
372 show_free_areas();
373 return -ENOMEM;
376 tblock->rblock = (struct mm_rblock_struct *)
377 kmalloc(sizeof(struct mm_rblock_struct), GFP_KERNEL);
379 if (!tblock->rblock) {
380 printk("Allocation of rblock for %lu byte allocation from process %d failed\n", len, current->pid);
381 show_free_areas();
382 kfree(tblock);
383 return -ENOMEM;
386 result = kmalloc(len, GFP_KERNEL);
387 if (!result) {
388 printk("Allocation of length %lu from process %d failed\n", len,
389 current->pid);
390 show_free_areas();
391 kfree(tblock->rblock);
392 kfree(tblock);
393 return -ENOMEM;
396 tblock->rblock->refcount = 1;
397 tblock->rblock->kblock = result;
398 tblock->rblock->size = len;
400 realalloc += kobjsize(result);
401 askedalloc += len;
403 #ifdef WARN_ON_SLACK
404 if ((len+WARN_ON_SLACK) <= kobjsize(result))
405 printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n", len, current->pid, kobjsize(result)-len);
406 #endif
408 if (file) {
409 int error;
410 mm_segment_t old_fs = get_fs();
411 set_fs(KERNEL_DS);
412 error = file->f_op->read(file, (char *) result, len, &file->f_pos);
413 set_fs(old_fs);
414 if (error < 0) {
415 kfree(result);
416 kfree(tblock->rblock);
417 kfree(tblock);
418 return error;
420 if (error < len)
421 memset(result+error, '\0', len-error);
422 } else {
423 memset(result, '\0', len);
426 realalloc += kobjsize(tblock);
427 askedalloc += sizeof(struct mm_tblock_struct);
429 realalloc += kobjsize(tblock->rblock);
430 askedalloc += sizeof(struct mm_rblock_struct);
432 tblock->next = current->mm->context.tblock.next;
433 current->mm->context.tblock.next = tblock;
435 #ifdef DEBUG
436 printk("do_mmap:\n");
437 show_process_blocks();
438 #endif
440 return (unsigned long)result;
443 int do_munmap(struct mm_struct * mm, unsigned long addr, size_t len)
445 struct mm_tblock_struct * tblock, *tmp;
447 #ifdef MAGIC_ROM_PTR
449 * For efficiency's sake, if the pointer is obviously in ROM,
450 * don't bother walking the lists to free it.
452 if (is_in_rom(addr))
453 return 0;
454 #endif
456 #ifdef DEBUG
457 printk("do_munmap:\n");
458 #endif
460 tmp = &mm->context.tblock; /* dummy head */
461 while ((tblock=tmp->next) && tblock->rblock &&
462 tblock->rblock->kblock != (void*)addr)
463 tmp = tblock;
465 if (!tblock) {
466 printk("munmap of non-mmaped memory by process %d (%s): %p\n",
467 current->pid, current->comm, (void*)addr);
468 return -EINVAL;
470 if (tblock->rblock) {
471 if (!--tblock->rblock->refcount) {
472 if (tblock->rblock->kblock) {
473 realalloc -= kobjsize(tblock->rblock->kblock);
474 askedalloc -= tblock->rblock->size;
475 kfree(tblock->rblock->kblock);
478 realalloc -= kobjsize(tblock->rblock);
479 askedalloc -= sizeof(struct mm_rblock_struct);
480 kfree(tblock->rblock);
483 tmp->next = tblock->next;
484 realalloc -= kobjsize(tblock);
485 askedalloc -= sizeof(struct mm_tblock_struct);
486 kfree(tblock);
488 #ifdef DEBUG
489 show_process_blocks();
490 #endif
492 return 0;
495 /* Release all mmaps. */
496 void exit_mmap(struct mm_struct * mm)
498 struct mm_tblock_struct *tmp;
500 if (!mm)
501 return;
503 #ifdef DEBUG
504 printk("Exit_mmap:\n");
505 #endif
507 while((tmp = mm->context.tblock.next)) {
508 if (tmp->rblock) {
509 if (!--tmp->rblock->refcount) {
510 if (tmp->rblock->kblock) {
511 realalloc -= kobjsize(tmp->rblock->kblock);
512 askedalloc -= tmp->rblock->size;
513 kfree(tmp->rblock->kblock);
515 realalloc -= kobjsize(tmp->rblock);
516 askedalloc -= sizeof(struct mm_rblock_struct);
517 kfree(tmp->rblock);
519 tmp->rblock = 0;
521 mm->context.tblock.next = tmp->next;
522 realalloc -= kobjsize(tmp);
523 askedalloc -= sizeof(struct mm_tblock_struct);
524 kfree(tmp);
527 #ifdef DEBUG
528 show_process_blocks();
529 #endif
532 asmlinkage long sys_munmap(unsigned long addr, size_t len)
534 int ret;
535 struct mm_struct *mm = current->mm;
537 down_write(&mm->mmap_sem);
538 ret = do_munmap(mm, addr, len);
539 up_write(&mm->mmap_sem);
540 return ret;
543 unsigned long do_brk(unsigned long addr, unsigned long len)
545 return -ENOMEM;
548 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
550 return NULL;
553 struct page * follow_page(struct mm_struct *mm, unsigned long addr, int write)
555 return NULL;
558 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
560 return NULL;
563 int remap_page_range(struct vm_area_struct *vma, unsigned long from,
564 unsigned long to, unsigned long size, pgprot_t prot)
566 return -EPERM;
569 unsigned long get_unmapped_area(struct file *file, unsigned long addr,
570 unsigned long len, unsigned long pgoff, unsigned long flags)
572 return -ENOMEM;
575 void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)