Import 2.3.41pre2
[davej-history.git] / arch / sparc64 / mm / asyncd.c
blobb87efd59022182188f5f62d7ce5f9feaf9f8271c
1 /* $Id: asyncd.c,v 1.12 2000/01/21 11:39:13 jj Exp $
2 * The asyncd kernel daemon. This handles paging on behalf of
3 * processes that receive page faults due to remote (async) memory
4 * accesses.
6 * Idea and skeleton code courtesy of David Miller (bless his cotton socks)
8 * Implemented by tridge
9 */
11 #include <linux/mm.h>
12 #include <linux/malloc.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/stat.h>
19 #include <linux/swap.h>
20 #include <linux/fs.h>
21 #include <linux/config.h>
22 #include <linux/interrupt.h>
23 #include <linux/signal.h>
25 #include <asm/dma.h>
26 #include <asm/system.h> /* for cli()/sti() */
27 #include <asm/segment.h> /* for memcpy_to/fromfs */
28 #include <asm/bitops.h>
29 #include <asm/pgalloc.h>
30 #include <asm/pgtable.h>
32 #define DEBUG 0
34 #define WRITE_LIMIT 100
35 #define LOOP_LIMIT 200
37 static struct {
38 int faults, read, write, success, failure, errors;
39 } stats;
41 /*
42 * The wait queue for waking up the async daemon:
44 static DECLARE_WAIT_QUEUE_HEAD(asyncd_wait);
46 struct async_job {
47 volatile struct async_job *next;
48 int taskid;
49 struct mm_struct *mm;
50 unsigned long address;
51 int write;
52 void (*callback)(int,unsigned long,int,int);
55 static volatile struct async_job *async_queue = NULL;
56 static volatile struct async_job *async_queue_end = NULL;
58 static void add_to_async_queue(int taskid,
59 struct mm_struct *mm,
60 unsigned long address,
61 int write,
62 void (*callback)(int,unsigned long,int,int))
64 struct async_job *a = kmalloc(sizeof(*a),GFP_ATOMIC);
66 if (!a) {
67 printk("ERROR: out of memory in asyncd\n");
68 a->callback(taskid,address,write,1);
69 return;
72 if (write)
73 stats.write++;
74 else
75 stats.read++;
77 a->next = NULL;
78 a->taskid = taskid;
79 a->mm = mm;
80 a->address = address;
81 a->write = write;
82 a->callback = callback;
84 if (!async_queue) {
85 async_queue = a;
86 } else {
87 async_queue_end->next = a;
89 async_queue_end = a;
93 void async_fault(unsigned long address, int write, int taskid,
94 void (*callback)(int,unsigned long,int,int))
96 #warning Need some fixing here... -DaveM
97 struct task_struct *tsk = current /* XXX task[taskid] */;
98 struct mm_struct *mm = tsk->mm;
100 stats.faults++;
102 #if 0
103 printk("paging in %x for task=%d\n",address,taskid);
104 #endif
106 add_to_async_queue(taskid, mm, address, write, callback);
107 wake_up(&asyncd_wait);
108 mark_bh(TQUEUE_BH);
111 static int fault_in_page(int taskid,
112 struct vm_area_struct *vma,
113 unsigned long address, int write)
115 static unsigned last_address;
116 static int last_task, loop_counter;
117 siginfo_t info;
118 #warning Need some fixing here... -DaveM
119 struct task_struct *tsk = current /* XXX task[taskid] */;
120 pgd_t *pgd;
121 pmd_t *pmd;
122 pte_t *pte;
124 if (!tsk || !tsk->mm)
125 return 1;
127 if (!vma || (write && !(vma->vm_flags & VM_WRITE)))
128 goto bad_area;
129 if (vma->vm_start > address)
130 goto bad_area;
132 if (address == last_address && taskid == last_task) {
133 loop_counter++;
134 } else {
135 loop_counter = 0;
136 last_address = address;
137 last_task = taskid;
140 if (loop_counter == WRITE_LIMIT && !write) {
141 printk("MSC bug? setting write request\n");
142 stats.errors++;
143 write = 1;
146 if (loop_counter == LOOP_LIMIT) {
147 printk("MSC bug? failing request\n");
148 stats.errors++;
149 return 1;
152 pgd = pgd_offset(vma->vm_mm, address);
153 pmd = pmd_alloc(pgd,address);
154 if(!pmd)
155 goto no_memory;
156 pte = pte_alloc(pmd, address);
157 if(!pte)
158 goto no_memory;
159 if(!pte_present(*pte)) {
160 handle_mm_fault(tsk, vma, address, write);
161 goto finish_up;
163 set_pte(pte, pte_mkyoung(*pte));
164 flush_tlb_page(vma, address);
165 if(!write)
166 goto finish_up;
167 if(pte_write(*pte)) {
168 set_pte(pte, pte_mkdirty(*pte));
169 flush_tlb_page(vma, address);
170 goto finish_up;
172 handle_mm_fault(tsk, vma, address, write);
174 /* Fall through for do_wp_page */
175 finish_up:
176 stats.success++;
177 return 0;
179 no_memory:
180 stats.failure++;
181 oom(tsk);
182 return 1;
184 bad_area:
185 stats.failure++;
186 info.si_signo = SIGSEGV;
187 info.si_errno = 0;
188 info.si_code = SEGV_MAPERR;
189 info.si_addr = (void *)address;
190 info.si_trapno = 0;
191 send_sig_info(SIGSEGV, &info, tsk);
192 return 1;
196 /* Note the semaphore operations must be done here, and _not_
197 * in async_fault().
199 static void run_async_queue(void)
201 int ret;
202 unsigned flags;
204 while (async_queue) {
205 volatile struct async_job *a;
206 struct mm_struct *mm;
207 struct vm_area_struct *vma;
209 save_flags(flags); cli();
210 a = async_queue;
211 async_queue = async_queue->next;
212 restore_flags(flags);
214 mm = a->mm;
216 down(&mm->mmap_sem);
217 vma = find_vma(mm, a->address);
218 ret = fault_in_page(a->taskid,vma,a->address,a->write);
219 #if DEBUG
220 printk("fault_in_page(task=%d addr=%x write=%d) = %d\n",
221 a->taskid,a->address,a->write,ret);
222 #endif
223 a->callback(a->taskid,a->address,a->write,ret);
224 up(&mm->mmap_sem);
225 kfree_s((void *)a,sizeof(*a));
230 #if CONFIG_AP1000
231 static void asyncd_info(void)
233 printk("CID(%d) faults: total=%d read=%d write=%d success=%d fail=%d err=%d\n",
234 mpp_cid(),stats.faults, stats.read, stats.write, stats.success,
235 stats.failure, stats.errors);
237 #endif
241 * The background async daemon.
242 * Started as a kernel thread from the init process.
244 int asyncd(void *unused)
246 current->session = 1;
247 current->pgrp = 1;
248 sprintf(current->comm, "asyncd");
250 sigfillset(&current->blocked); /* block all signals */
251 recalc_sigpending(current);
253 /* Give asyncd a realtime priority. */
254 current->policy = SCHED_FIFO;
255 current->priority = 32; /* Fixme --- we need to standardise our
256 namings for POSIX.4 realtime scheduling
257 priorities. */
259 printk("Started asyncd\n");
261 #if CONFIG_AP1000
262 bif_add_debug_key('a',asyncd_info,"stats on asyncd");
263 #endif
265 while (1) {
266 unsigned flags;
268 save_flags(flags); cli();
270 while (!async_queue) {
271 spin_lock(&current->sigmask_lock);
272 flush_signals(current);
273 spin_unlock(&current->sigmask_lock);
274 interruptible_sleep_on(&asyncd_wait);
275 __sti(); cli(); /* acquire gloabl_irq_lock */
278 restore_flags(flags);
280 run_async_queue();