Import 2.1.118
[davej-history.git] / drivers / ap1000 / ringbuf.c
blobd8f9b02c3a3fb9f8d9f7cab3e777ba8c5b6aed08
1 /*
2 * Copyright 1996 The Australian National University.
3 * Copyright 1996 Fujitsu Laboratories Limited
4 *
5 * This software may be distributed under the terms of the Gnu
6 * Public License version 2 or later
7 */
8 /*
9 * linux/drivers/ap1000/ringbuf.c
11 * This provides the /proc/XX/ringbuf interface to the Tnet ring buffer
13 #define _APLIB_
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/malloc.h>
19 #include <linux/mm.h>
21 #include <asm/page.h>
22 #include <asm/segment.h>
23 #include <asm/io.h>
24 #include <asm/pgtable.h>
25 #include <asm/segment.h>
26 #include <asm/uaccess.h>
28 #include <asm/ap1000/pgtapmmu.h>
29 #include <asm/ap1000/apreg.h>
30 #include <asm/ap1000/apservice.h>
33 /* we have a small number of reserved ring buffers to ensure that at
34 least one parallel program can always run */
35 #define RBUF_RESERVED 4
36 #define RBUF_RESERVED_ORDER 5
37 static struct {
38 char *rb_ptr;
39 char *shared_ptr;
40 int used;
41 } reserved_ringbuf[RBUF_RESERVED];
44 void ap_ringbuf_init(void)
46 int i,j;
47 char *rb_ptr, *shared_ptr;
48 int rb_size = PAGE_SIZE * (1<<RBUF_RESERVED_ORDER);
50 /* preallocate some ringbuffers */
51 for (i=0;i<RBUF_RESERVED;i++) {
52 if (!(rb_ptr = (char *)__get_free_pages(GFP_ATOMIC,RBUF_RESERVED_ORDER))) {
53 printk("failed to preallocate ringbuf %d\n",i);
54 return;
56 for (j = MAP_NR(rb_ptr); j <= MAP_NR(rb_ptr+rb_size-1); j++) {
57 set_bit(PG_reserved,&mem_map[j].flags);
60 if (!(shared_ptr = (char *)__get_free_page(GFP_ATOMIC))) {
61 printk("failed to preallocate shared ptr %d\n",i);
62 return;
64 set_bit(PG_reserved,&mem_map[MAP_NR(shared_ptr)].flags);
66 reserved_ringbuf[i].used = 0;
67 reserved_ringbuf[i].rb_ptr = rb_ptr;
68 reserved_ringbuf[i].shared_ptr = shared_ptr;
74 void exit_ringbuf(struct task_struct *tsk)
76 int i;
78 if (!tsk->ringbuf) return;
80 if (tsk->ringbuf->ringbuf) {
81 char *rb_ptr = tsk->ringbuf->ringbuf;
82 char *shared_ptr = tsk->ringbuf->shared;
83 int order = tsk->ringbuf->order;
84 int rb_size = PAGE_SIZE * (1<<order);
86 for (i=0;i<RBUF_RESERVED;i++)
87 if (rb_ptr == reserved_ringbuf[i].rb_ptr) break;
89 if (i < RBUF_RESERVED) {
90 reserved_ringbuf[i].used = 0;
91 } else {
92 for (i = MAP_NR(rb_ptr); i <= MAP_NR(rb_ptr+rb_size-1); i++) {
93 clear_bit(PG_reserved, &mem_map[i].flags);
95 free_pages((unsigned)rb_ptr,order);
97 i = MAP_NR(shared_ptr);
98 clear_bit(PG_reserved,&mem_map[i]);
99 free_page((unsigned)shared_ptr);
103 kfree_s(tsk->ringbuf,sizeof(*(tsk->ringbuf)));
104 tsk->ringbuf = NULL;
109 * map the ring buffer into users memory
111 static int cap_map(int rb_size)
113 struct task_struct *tsk=current;
114 int i;
115 char *rb_ptr=NULL;
116 char *shared_ptr=NULL;
117 int order = 0;
118 int error,old_uid;
120 error = verify_area(VERIFY_WRITE,(char *)RBUF_VBASE,rb_size);
121 if (error) return error;
123 if (!MPP_IS_PAR_TASK(tsk->taskid)) {
124 printk("ringbuf_mmap called from non-parallel task\n");
125 return -EINVAL;
129 if (tsk->ringbuf) return -EINVAL;
131 rb_size -= RBUF_RING_BUFFER_OFFSET;
132 rb_size >>= 1;
134 switch (rb_size/1024) {
135 case 128:
136 order = 5;
137 break;
138 case 512:
139 order = 7;
140 break;
141 case 2048:
142 order = 9;
143 break;
144 case 8192:
145 order = 11;
146 break;
147 default:
148 printk("ringbuf_mmap with invalid size %d\n",rb_size);
149 return -EINVAL;
152 if (order == RBUF_RESERVED_ORDER) {
153 for (i=0;i<RBUF_RESERVED;i++)
154 if (!reserved_ringbuf[i].used) {
155 rb_ptr = reserved_ringbuf[i].rb_ptr;
156 shared_ptr = reserved_ringbuf[i].shared_ptr;
157 reserved_ringbuf[i].used = 1;
158 break;
162 if (!rb_ptr) {
163 rb_ptr = (char *)__get_free_pages(GFP_USER,order);
164 if (!rb_ptr) return -ENOMEM;
166 for (i = MAP_NR(rb_ptr); i <= MAP_NR(rb_ptr+rb_size-1); i++) {
167 set_bit(PG_reserved,&mem_map[i].flags);
170 shared_ptr = (char *)__get_free_page(GFP_USER);
171 if (!shared_ptr)
172 return -ENOMEM;
173 set_bit(PG_reserved,&mem_map[MAP_NR(shared_ptr)].flags);
176 if (!rb_ptr)
177 return -ENOMEM;
179 memset(rb_ptr,0,rb_size);
180 memset(shared_ptr,0,PAGE_SIZE);
182 if (remap_page_range(RBUF_VBASE + RBUF_RING_BUFFER_OFFSET,
183 mmu_v2p((unsigned)rb_ptr),
184 rb_size,APMMU_PAGE_SHARED))
185 return -EAGAIN;
187 if (remap_page_range(RBUF_VBASE + RBUF_RING_BUFFER_OFFSET + rb_size,
188 mmu_v2p((unsigned)rb_ptr),
189 rb_size,APMMU_PAGE_SHARED))
190 return -EAGAIN;
192 /* the shared area */
193 if (remap_page_range(RBUF_VBASE + RBUF_SHARED_PAGE_OFF,
194 mmu_v2p((unsigned)shared_ptr),
195 PAGE_SIZE,APMMU_PAGE_SHARED))
196 return -EAGAIN;
198 #if 0
199 /* lock the ringbuffer in memory */
200 old_uid = current->euid;
201 current->euid = 0;
202 error = sys_mlock(RBUF_VBASE,2*rb_size+RBUF_RING_BUFFER_OFFSET);
203 current->euid = old_uid;
204 if (error) {
205 printk("ringbuffer mlock failed\n");
206 return error;
208 #endif
210 /* the queue pages */
211 #define MAP_QUEUE(offset,phys) \
212 io_remap_page_range(RBUF_VBASE + offset, \
213 phys<<PAGE_SHIFT,PAGE_SIZE,APMMU_PAGE_SHARED,0xa)
215 MAP_QUEUE(RBUF_PUT_QUEUE, 0x00000);
216 MAP_QUEUE(RBUF_GET_QUEUE, 0x00001);
217 MAP_QUEUE(RBUF_SEND_QUEUE, 0x00040);
219 MAP_QUEUE(RBUF_XY_QUEUE, 0x00640);
220 MAP_QUEUE(RBUF_X_QUEUE, 0x00240);
221 MAP_QUEUE(RBUF_Y_QUEUE, 0x00440);
222 MAP_QUEUE(RBUF_XYG_QUEUE, 0x00600);
223 MAP_QUEUE(RBUF_XG_QUEUE, 0x00200);
224 MAP_QUEUE(RBUF_YG_QUEUE, 0x00400);
225 MAP_QUEUE(RBUF_CSI_QUEUE, 0x02004);
226 MAP_QUEUE(RBUF_FOP_QUEUE, 0x02005);
228 #undef MAP_QUEUE
230 if (!tsk->ringbuf) {
231 tsk->ringbuf = (void *)kmalloc(sizeof(*(tsk->ringbuf)),GFP_ATOMIC);
232 if (!tsk->ringbuf)
233 return -ENOMEM;
236 memset(tsk->ringbuf,0,sizeof(*tsk->ringbuf));
237 tsk->ringbuf->ringbuf = rb_ptr;
238 tsk->ringbuf->shared = shared_ptr;
239 tsk->ringbuf->order = order;
240 tsk->ringbuf->write_ptr = mmu_v2p((unsigned)rb_ptr)<<1;
241 tsk->ringbuf->vaddr = RBUF_VBASE;
243 memset(tsk->ringbuf->vaddr+RBUF_SHARED_PAGE_OFF,0,PAGE_SIZE);
245 struct _kernel_cap_shared *_kernel =
246 (struct _kernel_cap_shared *)tsk->ringbuf->vaddr;
247 _kernel->rbuf_read_ptr = (rb_size>>5) - 1;
250 return 0;
254 static int
255 ringbuf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
257 int numcells, *phys_cells;
258 extern struct cap_init cap_init;
260 switch (cmd) {
261 case CAP_GETINIT:
262 if (copy_to_user((char *)arg,(char *)&cap_init,sizeof(cap_init)))
263 return -EFAULT;
264 break;
266 case CAP_SYNC:
267 if (verify_area(VERIFY_READ, (void *) arg, sizeof(int)*2))
268 return -EFAULT;
269 if (get_user(numcells,(int *)arg)) return -EFAULT;
270 if (get_user((unsigned)phys_cells,
271 ((int *)arg)+1)) return -EFAULT;
272 if (verify_area(VERIFY_READ,phys_cells,sizeof(int)*numcells))
273 return -EFAULT;
274 return ap_sync(numcells,phys_cells);
275 break;
277 case CAP_SETGANG:
279 int v;
280 if (get_user(v,(int *)arg)) return -EFAULT;
281 mpp_set_gang_factor(v);
282 break;
285 case CAP_MAP:
286 return cap_map(arg);
288 default:
289 printk("unknown ringbuf ioctl %d\n",cmd);
290 return -EINVAL;
292 return 0;
296 static struct file_operations proc_ringbuf_operations = {
297 NULL,
298 NULL,
299 NULL,
300 NULL, /* readdir */
301 NULL, /* poll */
302 ringbuf_ioctl, /* ioctl */
303 NULL, /* mmap */
304 NULL, /* no special open code */
305 NULL, /* flush */
306 NULL, /* no special release code */
307 NULL /* can't fsync */
310 struct inode_operations proc_ringbuf_inode_operations = {
311 &proc_ringbuf_operations, /* default base directory file-ops */
312 NULL, /* create */
313 NULL, /* lookup */
314 NULL, /* link */
315 NULL, /* unlink */
316 NULL, /* symlink */
317 NULL, /* mkdir */
318 NULL, /* rmdir */
319 NULL, /* mknod */
320 NULL, /* rename */
321 NULL, /* readlink */
322 NULL, /* readpage */
323 NULL, /* writepage */
324 NULL, /* bmap */
325 NULL, /* truncate */
326 NULL /* permission */