Merge with Linux 2.3.40.
[linux-2.6/linux-mips.git] / arch / sparc / mm / asyncd.c
blob569940417c42798716a13e3266583df1cbebedbb
1 /* $Id: asyncd.c,v 1.19 2000/01/08 20:22:16 davem Exp $
2 * The asyncd kernel daemon. This handles paging on behalf of
3 * processes that receive page faults due to remote (async) memory
4 * accesses.
6 * Idea and skeleton code courtesy of David Miller (bless his cotton socks)
8 * Implemented by tridge
9 */
11 #include <linux/mm.h>
12 #include <linux/malloc.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/stat.h>
19 #include <linux/swap.h>
20 #include <linux/fs.h>
21 #include <linux/config.h>
22 #include <linux/interrupt.h>
24 #include <asm/dma.h>
25 #include <asm/system.h> /* for cli()/sti() */
26 #include <asm/segment.h> /* for memcpy_to/fromfs */
27 #include <asm/bitops.h>
28 #include <asm/pgalloc.h>
29 #include <asm/pgtable.h>
31 #define DEBUG 0
33 #define WRITE_LIMIT 100
34 #define LOOP_LIMIT 200
36 static struct {
37 int faults, read, write, success, failure, errors;
38 } stats;
40 /*
41 * The wait queue for waking up the async daemon:
43 static DECLARE_WAIT_QUEUE_HEAD(asyncd_wait);
45 struct async_job {
46 volatile struct async_job *next;
47 int taskid;
48 struct mm_struct *mm;
49 unsigned long address;
50 int write;
51 void (*callback)(int,unsigned long,int,int);
54 static volatile struct async_job *async_queue = NULL;
55 static volatile struct async_job *async_queue_end = NULL;
57 static void add_to_async_queue(int taskid,
58 struct mm_struct *mm,
59 unsigned long address,
60 int write,
61 void (*callback)(int,unsigned long,int,int))
63 struct async_job *a = kmalloc(sizeof(*a),GFP_ATOMIC);
65 if (!a) {
66 printk("ERROR: out of memory in asyncd\n");
67 a->callback(taskid,address,write,1);
68 return;
71 if (write)
72 stats.write++;
73 else
74 stats.read++;
76 a->next = NULL;
77 a->taskid = taskid;
78 a->mm = mm;
79 a->address = address;
80 a->write = write;
81 a->callback = callback;
83 if (!async_queue) {
84 async_queue = a;
85 } else {
86 async_queue_end->next = a;
88 async_queue_end = a;
92 void async_fault(unsigned long address, int write, int taskid,
93 void (*callback)(int,unsigned long,int,int))
95 struct task_struct *tsk = task[taskid];
96 struct mm_struct *mm = tsk->mm;
98 stats.faults++;
100 #if 0
101 printk("paging in %x for task=%d\n",address,taskid);
102 #endif
104 add_to_async_queue(taskid, mm, address, write, callback);
105 wake_up(&asyncd_wait);
106 mark_bh(TQUEUE_BH);
109 static int fault_in_page(int taskid,
110 struct vm_area_struct *vma,
111 unsigned address,int write)
113 static unsigned last_address;
114 static int last_task, loop_counter;
115 struct task_struct *tsk = task[taskid];
116 pgd_t *pgd;
117 pmd_t *pmd;
118 pte_t *pte;
120 if (!tsk || !tsk->mm)
121 return 1;
123 if (!vma || (write && !(vma->vm_flags & VM_WRITE)))
124 goto bad_area;
125 if (vma->vm_start > address)
126 goto bad_area;
128 if (address == last_address && taskid == last_task) {
129 loop_counter++;
130 } else {
131 loop_counter = 0;
132 last_address = address;
133 last_task = taskid;
136 if (loop_counter == WRITE_LIMIT && !write) {
137 printk("MSC bug? setting write request\n");
138 stats.errors++;
139 write = 1;
142 if (loop_counter == LOOP_LIMIT) {
143 printk("MSC bug? failing request\n");
144 stats.errors++;
145 return 1;
148 pgd = pgd_offset(vma->vm_mm, address);
149 pmd = pmd_alloc(pgd,address);
150 if(!pmd)
151 goto no_memory;
152 pte = pte_alloc(pmd, address);
153 if(!pte)
154 goto no_memory;
155 if(!pte_present(*pte)) {
156 handle_mm_fault(tsk, vma, address, write);
157 goto finish_up;
159 set_pte(pte, pte_mkyoung(*pte));
160 flush_tlb_page(vma, address);
161 if(!write)
162 goto finish_up;
163 if(pte_write(*pte)) {
164 set_pte(pte, pte_mkdirty(*pte));
165 flush_tlb_page(vma, address);
166 goto finish_up;
168 handle_mm_fault(tsk, vma, address, write);
170 /* Fall through for do_wp_page */
171 finish_up:
172 stats.success++;
173 return 0;
175 no_memory:
176 stats.failure++;
177 oom(tsk);
178 return 1;
180 bad_area:
181 stats.failure++;
182 tsk->thread.sig_address = address;
183 tsk->thread.sig_desc = SUBSIG_NOMAPPING;
184 send_sig(SIGSEGV, tsk, 1);
185 return 1;
189 /* Note the semaphore operations must be done here, and _not_
190 * in async_fault().
192 static void run_async_queue(void)
194 int ret;
195 unsigned flags;
197 while (async_queue) {
198 volatile struct async_job *a;
199 struct mm_struct *mm;
200 struct vm_area_struct *vma;
202 save_flags(flags); cli();
203 a = async_queue;
204 async_queue = async_queue->next;
205 restore_flags(flags);
207 mm = a->mm;
209 down(&mm->mmap_sem);
210 vma = find_vma(mm, a->address);
211 ret = fault_in_page(a->taskid,vma,a->address,a->write);
212 #if DEBUG
213 printk("fault_in_page(task=%d addr=%x write=%d) = %d\n",
214 a->taskid,a->address,a->write,ret);
215 #endif
216 a->callback(a->taskid,a->address,a->write,ret);
217 up(&mm->mmap_sem);
218 kfree_s((void *)a,sizeof(*a));
223 #if CONFIG_AP1000
224 static void asyncd_info(void)
226 printk("CID(%d) faults: total=%d read=%d write=%d success=%d fail=%d err=%d\n",
227 mpp_cid(),stats.faults, stats.read, stats.write, stats.success,
228 stats.failure, stats.errors);
230 #endif
234 * The background async daemon.
235 * Started as a kernel thread from the init process.
237 int asyncd(void *unused)
239 current->session = 1;
240 current->pgrp = 1;
241 sprintf(current->comm, "asyncd");
242 sigfillset(&current->blocked); /* block all signals */
243 recalc_sigpending(current);
245 /* Give asyncd a realtime priority. */
246 current->policy = SCHED_FIFO;
247 current->priority = 32; /* Fixme --- we need to standardise our
248 namings for POSIX.4 realtime scheduling
249 priorities. */
251 printk("Started asyncd\n");
253 #if CONFIG_AP1000
254 bif_add_debug_key('a',asyncd_info,"stats on asyncd");
255 #endif
257 while (1) {
258 unsigned flags;
260 save_flags(flags); cli();
262 while (!async_queue) {
263 spin_lock(&current->sigmask_lock);
264 flush_signals(current);
265 spin_unlock(&current->sigmask_lock);
266 interruptible_sleep_on(&asyncd_wait);
267 __sti(); cli();
270 restore_flags(flags);
272 run_async_queue();
276 #if CONFIG_AP1000
278 static int __init init_ap1000(void)
280 kernel_thread(asyncd, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
281 return 0;
284 module_init(init_ap1000)
286 #endif