Import 2.3.16
[davej-history.git] / arch / sparc / mm / asyncd.c
blobd17979cd44262426305e2eae4a7963ff07ff3a81
1 /* $Id: asyncd.c,v 1.17 1999/08/14 03:51:44 anton Exp $
2 * The asyncd kernel daemon. This handles paging on behalf of
3 * processes that receive page faults due to remote (async) memory
4 * accesses.
6 * Idea and skeleton code courtesy of David Miller (bless his cotton socks)
8 * Implemented by tridge
9 */
11 #include <linux/mm.h>
12 #include <linux/malloc.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/stat.h>
19 #include <linux/swap.h>
20 #include <linux/fs.h>
21 #include <linux/config.h>
22 #include <linux/interrupt.h>
24 #include <asm/dma.h>
25 #include <asm/system.h> /* for cli()/sti() */
26 #include <asm/segment.h> /* for memcpy_to/fromfs */
27 #include <asm/bitops.h>
28 #include <asm/pgtable.h>
30 #define DEBUG 0
32 #define WRITE_LIMIT 100
33 #define LOOP_LIMIT 200
35 static struct {
36 int faults, read, write, success, failure, errors;
37 } stats;
39 /*
40 * The wait queue for waking up the async daemon:
42 static DECLARE_WAIT_QUEUE_HEAD(asyncd_wait);
44 struct async_job {
45 volatile struct async_job *next;
46 int taskid;
47 struct mm_struct *mm;
48 unsigned long address;
49 int write;
50 void (*callback)(int,unsigned long,int,int);
53 static volatile struct async_job *async_queue = NULL;
54 static volatile struct async_job *async_queue_end = NULL;
56 static void add_to_async_queue(int taskid,
57 struct mm_struct *mm,
58 unsigned long address,
59 int write,
60 void (*callback)(int,unsigned long,int,int))
62 struct async_job *a = kmalloc(sizeof(*a),GFP_ATOMIC);
64 if (!a) {
65 printk("ERROR: out of memory in asyncd\n");
66 a->callback(taskid,address,write,1);
67 return;
70 if (write)
71 stats.write++;
72 else
73 stats.read++;
75 a->next = NULL;
76 a->taskid = taskid;
77 a->mm = mm;
78 a->address = address;
79 a->write = write;
80 a->callback = callback;
82 if (!async_queue) {
83 async_queue = a;
84 } else {
85 async_queue_end->next = a;
87 async_queue_end = a;
91 void async_fault(unsigned long address, int write, int taskid,
92 void (*callback)(int,unsigned long,int,int))
94 struct task_struct *tsk = task[taskid];
95 struct mm_struct *mm = tsk->mm;
97 stats.faults++;
99 #if 0
100 printk("paging in %x for task=%d\n",address,taskid);
101 #endif
103 add_to_async_queue(taskid, mm, address, write, callback);
104 wake_up(&asyncd_wait);
105 mark_bh(TQUEUE_BH);
108 static int fault_in_page(int taskid,
109 struct vm_area_struct *vma,
110 unsigned address,int write)
112 static unsigned last_address;
113 static int last_task, loop_counter;
114 struct task_struct *tsk = task[taskid];
115 pgd_t *pgd;
116 pmd_t *pmd;
117 pte_t *pte;
119 if (!tsk || !tsk->mm)
120 return 1;
122 if (!vma || (write && !(vma->vm_flags & VM_WRITE)))
123 goto bad_area;
124 if (vma->vm_start > address)
125 goto bad_area;
127 if (address == last_address && taskid == last_task) {
128 loop_counter++;
129 } else {
130 loop_counter = 0;
131 last_address = address;
132 last_task = taskid;
135 if (loop_counter == WRITE_LIMIT && !write) {
136 printk("MSC bug? setting write request\n");
137 stats.errors++;
138 write = 1;
141 if (loop_counter == LOOP_LIMIT) {
142 printk("MSC bug? failing request\n");
143 stats.errors++;
144 return 1;
147 pgd = pgd_offset(vma->vm_mm, address);
148 pmd = pmd_alloc(pgd,address);
149 if(!pmd)
150 goto no_memory;
151 pte = pte_alloc(pmd, address);
152 if(!pte)
153 goto no_memory;
154 if(!pte_present(*pte)) {
155 handle_mm_fault(tsk, vma, address, write);
156 goto finish_up;
158 set_pte(pte, pte_mkyoung(*pte));
159 flush_tlb_page(vma, address);
160 if(!write)
161 goto finish_up;
162 if(pte_write(*pte)) {
163 set_pte(pte, pte_mkdirty(*pte));
164 flush_tlb_page(vma, address);
165 goto finish_up;
167 handle_mm_fault(tsk, vma, address, write);
169 /* Fall through for do_wp_page */
170 finish_up:
171 stats.success++;
172 return 0;
174 no_memory:
175 stats.failure++;
176 oom(tsk);
177 return 1;
179 bad_area:
180 stats.failure++;
181 tsk->thread.sig_address = address;
182 tsk->thread.sig_desc = SUBSIG_NOMAPPING;
183 send_sig(SIGSEGV, tsk, 1);
184 return 1;
188 /* Note the semaphore operations must be done here, and _not_
189 * in async_fault().
191 static void run_async_queue(void)
193 int ret;
194 unsigned flags;
196 while (async_queue) {
197 volatile struct async_job *a;
198 struct mm_struct *mm;
199 struct vm_area_struct *vma;
201 save_flags(flags); cli();
202 a = async_queue;
203 async_queue = async_queue->next;
204 restore_flags(flags);
206 mm = a->mm;
208 down(&mm->mmap_sem);
209 vma = find_vma(mm, a->address);
210 ret = fault_in_page(a->taskid,vma,a->address,a->write);
211 #if DEBUG
212 printk("fault_in_page(task=%d addr=%x write=%d) = %d\n",
213 a->taskid,a->address,a->write,ret);
214 #endif
215 a->callback(a->taskid,a->address,a->write,ret);
216 up(&mm->mmap_sem);
217 kfree_s((void *)a,sizeof(*a));
222 #if CONFIG_AP1000
223 static void asyncd_info(void)
225 printk("CID(%d) faults: total=%d read=%d write=%d success=%d fail=%d err=%d\n",
226 mpp_cid(),stats.faults, stats.read, stats.write, stats.success,
227 stats.failure, stats.errors);
229 #endif
233 * The background async daemon.
234 * Started as a kernel thread from the init process.
236 int asyncd(void *unused)
238 current->session = 1;
239 current->pgrp = 1;
240 sprintf(current->comm, "asyncd");
241 sigfillset(&current->blocked); /* block all signals */
242 recalc_sigpending(current);
244 /* Give asyncd a realtime priority. */
245 current->policy = SCHED_FIFO;
246 current->priority = 32; /* Fixme --- we need to standardise our
247 namings for POSIX.4 realtime scheduling
248 priorities. */
250 printk("Started asyncd\n");
252 #if CONFIG_AP1000
253 bif_add_debug_key('a',asyncd_info,"stats on asyncd");
254 #endif
256 while (1) {
257 unsigned flags;
259 save_flags(flags); cli();
261 while (!async_queue) {
262 spin_lock_irq(&current->sigmask_lock);
263 flush_signals(current);
264 spin_unlock_irq(&current->sigmask_lock);
265 interruptible_sleep_on(&asyncd_wait);
268 restore_flags(flags);
270 run_async_queue();
274 #if CONFIG_AP1000
276 static int __init init_ap1000(void)
278 kernel_thread(asyncd, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
279 return 0;
282 module_init(init_ap1000)
284 #endif