MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / mm / nommu.c
blob8a56d5c1e172576f506613b37393f414caa8f59d
1 /*
2 * linux/mm/nommu.c
4 * Replacement code for mm functions to support CPU's that don't
5 * have any form of memory management unit (thus no virtual memory).
7 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
8 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
9 * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/swap.h>
15 #include <linux/smp_lock.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/blkdev.h>
21 #include <linux/backing-dev.h>
23 #include <asm/uaccess.h>
24 #include <asm/tlb.h>
25 #include <asm/tlbflush.h>
27 void *high_memory;
28 struct page *mem_map;
29 unsigned long max_mapnr;
30 unsigned long num_physpages;
31 unsigned long askedalloc, realalloc;
32 atomic_t vm_committed_space = ATOMIC_INIT(0);
33 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
34 int sysctl_overcommit_ratio = 50; /* default is 50% */
35 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
37 EXPORT_SYMBOL(sysctl_max_map_count);
38 EXPORT_SYMBOL(mem_map);
40 EXPORT_SYMBOL(vmalloc);
41 EXPORT_SYMBOL(vfree);
42 EXPORT_SYMBOL(vmalloc_to_page);
43 EXPORT_SYMBOL(vmalloc_32);
46 * Handle all mappings that got truncated by a "truncate()"
47 * system call.
49 * NOTE! We have to be ready to update the memory sharing
50 * between the file and the memory map for a potential last
51 * incomplete page. Ugly, but necessary.
53 int vmtruncate(struct inode *inode, loff_t offset)
55 struct address_space *mapping = inode->i_mapping;
56 unsigned long limit;
58 if (inode->i_size < offset)
59 goto do_expand;
60 i_size_write(inode, offset);
62 truncate_inode_pages(mapping, offset);
63 goto out_truncate;
65 do_expand:
66 limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
67 if (limit != RLIM_INFINITY && offset > limit)
68 goto out_sig;
69 if (offset > inode->i_sb->s_maxbytes)
70 goto out;
71 i_size_write(inode, offset);
73 out_truncate:
74 if (inode->i_op && inode->i_op->truncate)
75 inode->i_op->truncate(inode);
76 return 0;
77 out_sig:
78 send_sig(SIGXFSZ, current, 0);
79 out:
80 return -EFBIG;
83 EXPORT_SYMBOL(vmtruncate);
86 * Return the total memory allocated for this pointer, not
87 * just what the caller asked for.
89 * Doesn't have to be accurate, i.e. may have races.
91 unsigned int kobjsize(const void *objp)
93 struct page *page;
95 if (!objp || !((page = virt_to_page(objp))))
96 return 0;
98 if (PageSlab(page))
99 return ksize(objp);
101 BUG_ON(page->index < 0);
102 BUG_ON(page->index >= MAX_ORDER);
104 return (PAGE_SIZE << page->index);
108 * The nommu dodgy version :-)
110 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
111 unsigned long start, int len, int write, int force,
112 struct page **pages, struct vm_area_struct **vmas)
114 int i;
115 static struct vm_area_struct dummy_vma;
117 for (i = 0; i < len; i++) {
118 if (pages) {
119 pages[i] = virt_to_page(start);
120 if (pages[i])
121 page_cache_get(pages[i]);
123 if (vmas)
124 vmas[i] = &dummy_vma;
125 start += PAGE_SIZE;
127 return(i);
130 EXPORT_SYMBOL(get_user_pages);
132 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
133 struct vm_struct *vmlist;
135 void vfree(void *addr)
137 kfree(addr);
140 void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
143 * kmalloc doesn't like __GFP_HIGHMEM for some reason
145 return kmalloc(size, gfp_mask & ~__GFP_HIGHMEM);
148 struct page * vmalloc_to_page(void *addr)
150 return virt_to_page(addr);
153 long vread(char *buf, char *addr, unsigned long count)
155 memcpy(buf, addr, count);
156 return count;
159 long vwrite(char *buf, char *addr, unsigned long count)
161 /* Don't allow overflow */
162 if ((unsigned long) addr + count < count)
163 count = -(unsigned long) addr;
165 memcpy(addr, buf, count);
166 return(count);
170 * vmalloc - allocate virtually continguos memory
172 * @size: allocation size
174 * Allocate enough pages to cover @size from the page level
175 * allocator and map them into continguos kernel virtual space.
177 * For tight cotrol over page level allocator and protection flags
178 * use __vmalloc() instead.
180 void *vmalloc(unsigned long size)
182 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
186 * vmalloc_32 - allocate virtually continguos memory (32bit addressable)
188 * @size: allocation size
190 * Allocate enough 32bit PA addressable pages to cover @size from the
191 * page level allocator and map them into continguos kernel virtual space.
193 void *vmalloc_32(unsigned long size)
195 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
198 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
200 BUG();
201 return NULL;
204 void vunmap(void *addr)
206 BUG();
210 * sys_brk() for the most part doesn't need the global kernel
211 * lock, except when an application is doing something nasty
212 * like trying to un-brk an area that has already been mapped
213 * to a regular file. in this case, the unmapping will need
214 * to invoke file system routines that need the global lock.
216 asmlinkage unsigned long sys_brk(unsigned long brk)
218 struct mm_struct *mm = current->mm;
220 if (brk < mm->end_code || brk < mm->start_brk || brk > mm->context.end_brk)
221 return mm->brk;
223 if (mm->brk == brk)
224 return mm->brk;
227 * Always allow shrinking brk
229 if (brk <= mm->brk) {
230 mm->brk = brk;
231 return brk;
235 * Ok, looks good - let it rip.
237 return mm->brk = brk;
241 * Combine the mmap "prot" and "flags" argument into one "vm_flags" used
242 * internally. Essentially, translate the "PROT_xxx" and "MAP_xxx" bits
243 * into "VM_xxx".
245 static inline unsigned long calc_vm_flags(unsigned long prot, unsigned long flags)
247 #define _trans(x,bit1,bit2) \
248 ((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
250 unsigned long prot_bits, flag_bits;
251 prot_bits =
252 _trans(prot, PROT_READ, VM_READ) |
253 _trans(prot, PROT_WRITE, VM_WRITE) |
254 _trans(prot, PROT_EXEC, VM_EXEC);
255 flag_bits =
256 _trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) |
257 _trans(flags, MAP_DENYWRITE, VM_DENYWRITE) |
258 _trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE);
259 return prot_bits | flag_bits;
260 #undef _trans
263 #ifdef DEBUG
264 static void show_process_blocks(struct mm_struct * mm)
266 struct mm_tblock_struct *tblock;
268 printk("Process blocks %d:", current->pid);
270 for (tblock = &mm->context.tblock; tblock; tblock = tblock->next) {
271 printk(" %p: %p", tblock, tblock->rblock);
272 if (tblock->rblock)
273 printk(" (%d @%p #%d)", kobjsize(tblock->rblock->kblock), tblock->rblock->kblock, tblock->rblock->refcount);
274 printk(tblock->next ? " ->" : ".\n");
277 #endif /* DEBUG */
279 unsigned long do_mmap_pgoff(
280 struct file * file,
281 unsigned long addr,
282 unsigned long len,
283 unsigned long prot,
284 unsigned long flags,
285 unsigned long pgoff)
287 void * result;
288 struct mm_tblock_struct * tblock;
289 unsigned int vm_flags;
292 * Get the !CONFIG_MMU specific checks done first
294 if ((flags & MAP_SHARED) && (prot & PROT_WRITE) && (file)) {
295 printk("MAP_SHARED not supported (cannot write mappings to disk)\n");
296 return -EINVAL;
299 if ((prot & PROT_WRITE) && (flags & MAP_PRIVATE)) {
300 printk("Private writable mappings not supported\n");
301 return -EINVAL;
305 * now all the standard checks
307 if (file && (!file->f_op || !file->f_op->mmap))
308 return -ENODEV;
310 if (PAGE_ALIGN(len) == 0)
311 return addr;
313 if (len > TASK_SIZE)
314 return -EINVAL;
316 /* offset overflow? */
317 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
318 return -EINVAL;
320 /* Do simple checking here so the lower-level routines won't have
321 * to. we assume access permissions have been handled by the open
322 * of the memory object, so we don't do any here.
324 vm_flags = calc_vm_flags(prot,flags) /* | mm->def_flags */ | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
327 * determine the object being mapped and call the appropriate
328 * specific mapper.
330 if (file) {
331 struct vm_area_struct vma;
332 int error;
334 if (!file->f_op)
335 return -ENODEV;
337 vma.vm_start = addr;
338 vma.vm_end = addr + len;
339 vma.vm_flags = vm_flags;
340 vma.vm_pgoff = pgoff;
342 #ifdef CONFIG_MAGIC_ROM_PTR
343 /* First, try simpler routine designed to give us a ROM pointer. */
345 if (file->f_op->romptr && !(prot & PROT_WRITE)) {
346 error = file->f_op->romptr(file, &vma);
347 #ifdef DEBUG
348 printk("romptr mmap returned %d, start 0x%.8x\n", error,
349 vma.vm_start);
350 #endif
351 if (!error)
352 return vma.vm_start;
353 else if (error != -ENOSYS)
354 return error;
355 } else
356 #endif /* CONFIG_MAGIC_ROM_PTR */
357 /* Then try full mmap routine, which might return a RAM pointer,
358 or do something truly complicated. */
360 if (file->f_op->mmap) {
361 error = file->f_op->mmap(file, &vma);
363 #ifdef DEBUG
364 printk("f_op->mmap() returned %d/%lx\n", error, vma.vm_start);
365 #endif
366 if (!error)
367 return vma.vm_start;
368 else if (error != -ENOSYS)
369 return error;
370 } else
371 return -ENODEV; /* No mapping operations defined */
373 /* An ENOSYS error indicates that mmap isn't possible (as opposed to
374 tried but failed) so we'll fall through to the copy. */
377 tblock = (struct mm_tblock_struct *)
378 kmalloc(sizeof(struct mm_tblock_struct), GFP_KERNEL);
379 if (!tblock) {
380 printk("Allocation of tblock for %lu byte allocation from process %d failed\n", len, current->pid);
381 show_free_areas();
382 return -ENOMEM;
385 tblock->rblock = (struct mm_rblock_struct *)
386 kmalloc(sizeof(struct mm_rblock_struct), GFP_KERNEL);
388 if (!tblock->rblock) {
389 printk("Allocation of rblock for %lu byte allocation from process %d failed\n", len, current->pid);
390 show_free_areas();
391 kfree(tblock);
392 return -ENOMEM;
395 result = kmalloc(len, GFP_KERNEL);
396 if (!result) {
397 printk("Allocation of length %lu from process %d failed\n", len,
398 current->pid);
399 show_free_areas();
400 kfree(tblock->rblock);
401 kfree(tblock);
402 return -ENOMEM;
405 tblock->rblock->refcount = 1;
406 tblock->rblock->kblock = result;
407 tblock->rblock->size = len;
409 realalloc += kobjsize(result);
410 askedalloc += len;
412 #ifdef WARN_ON_SLACK
413 if ((len+WARN_ON_SLACK) <= kobjsize(result))
414 printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n", len, current->pid, kobjsize(result)-len);
415 #endif
417 if (file) {
418 int error;
419 mm_segment_t old_fs = get_fs();
420 set_fs(KERNEL_DS);
421 error = file->f_op->read(file, (char *) result, len, &file->f_pos);
422 set_fs(old_fs);
423 if (error < 0) {
424 kfree(result);
425 kfree(tblock->rblock);
426 kfree(tblock);
427 return error;
429 if (error < len)
430 memset(result+error, '\0', len-error);
431 } else {
432 memset(result, '\0', len);
435 realalloc += kobjsize(tblock);
436 askedalloc += sizeof(struct mm_tblock_struct);
438 realalloc += kobjsize(tblock->rblock);
439 askedalloc += sizeof(struct mm_rblock_struct);
441 tblock->next = current->mm->context.tblock.next;
442 current->mm->context.tblock.next = tblock;
443 current->mm->total_vm += len >> PAGE_SHIFT;
445 #ifdef DEBUG
446 printk("do_mmap:\n");
447 show_process_blocks(current->mm);
448 #endif
450 return (unsigned long)result;
453 int do_munmap(struct mm_struct * mm, unsigned long addr, size_t len)
455 struct mm_tblock_struct * tblock, *tmp;
457 #ifdef CONFIG_MAGIC_ROM_PTR
459 * For efficiency's sake, if the pointer is obviously in ROM,
460 * don't bother walking the lists to free it.
462 if (is_in_rom(addr))
463 return 0;
464 #endif /* CONFIG_MAGIC_ROM_PTR */
466 #ifdef DEBUG
467 printk("do_munmap:\n");
468 #endif
470 tmp = &mm->context.tblock; /* dummy head */
471 while ((tblock=tmp->next) && tblock->rblock &&
472 tblock->rblock->kblock != (void*)addr)
473 tmp = tblock;
475 if (!tblock) {
476 printk("munmap of non-mmaped memory by process %d (%s): %p\n",
477 current->pid, current->comm, (void*)addr);
478 return -EINVAL;
480 if (tblock->rblock) {
481 if (!--tblock->rblock->refcount) {
482 if (tblock->rblock->kblock) {
483 realalloc -= kobjsize(tblock->rblock->kblock);
484 askedalloc -= tblock->rblock->size;
485 kfree(tblock->rblock->kblock);
488 realalloc -= kobjsize(tblock->rblock);
489 askedalloc -= sizeof(struct mm_rblock_struct);
490 kfree(tblock->rblock);
493 tmp->next = tblock->next;
494 realalloc -= kobjsize(tblock);
495 askedalloc -= sizeof(struct mm_tblock_struct);
496 kfree(tblock);
497 mm->total_vm -= len >> PAGE_SHIFT;
499 #ifdef DEBUG
500 show_process_blocks(mm);
501 #endif
503 return 0;
506 /* Release all mmaps. */
507 void exit_mmap(struct mm_struct * mm)
509 struct mm_tblock_struct *tmp;
510 mm->total_vm = 0;
512 if (!mm)
513 return;
515 #ifdef DEBUG
516 printk("Exit_mmap:\n");
517 #endif
519 while((tmp = mm->context.tblock.next)) {
520 if (tmp->rblock) {
521 if (!--tmp->rblock->refcount) {
522 if (tmp->rblock->kblock) {
523 realalloc -= kobjsize(tmp->rblock->kblock);
524 askedalloc -= tmp->rblock->size;
525 kfree(tmp->rblock->kblock);
527 realalloc -= kobjsize(tmp->rblock);
528 askedalloc -= sizeof(struct mm_rblock_struct);
529 kfree(tmp->rblock);
531 tmp->rblock = 0;
533 mm->context.tblock.next = tmp->next;
534 realalloc -= kobjsize(tmp);
535 askedalloc -= sizeof(struct mm_tblock_struct);
536 kfree(tmp);
539 #ifdef DEBUG
540 show_process_blocks(mm);
541 #endif
544 asmlinkage long sys_munmap(unsigned long addr, size_t len)
546 int ret;
547 struct mm_struct *mm = current->mm;
549 down_write(&mm->mmap_sem);
550 ret = do_munmap(mm, addr, len);
551 up_write(&mm->mmap_sem);
552 return ret;
555 unsigned long do_brk(unsigned long addr, unsigned long len)
557 return -ENOMEM;
560 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
562 return NULL;
565 struct page * follow_page(struct mm_struct *mm, unsigned long addr, int write)
567 return NULL;
570 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
572 return NULL;
575 int remap_page_range(struct vm_area_struct *vma, unsigned long from,
576 unsigned long to, unsigned long size, pgprot_t prot)
578 return -EPERM;
581 unsigned long get_unmapped_area(struct file *file, unsigned long addr,
582 unsigned long len, unsigned long pgoff, unsigned long flags)
584 return -ENOMEM;
587 void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
591 unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
592 unsigned long len, unsigned long pgoff, unsigned long flags)
594 return -ENOMEM;
597 void arch_unmap_area(struct vm_area_struct *area)