hpsa: honor queue depth of physical devices
[linux-2.6/btrfs-unstable.git] / drivers / staging / comedi / comedi_buf.c
blob19e7b229d15e538954549f52796dd932dfea64dc
1 /*
2 * comedi_buf.c
4 * COMEDI - Linux Control and Measurement Device Interface
5 * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
6 * Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/vmalloc.h>
20 #include <linux/slab.h>
22 #include "comedidev.h"
23 #include "comedi_internal.h"
25 #ifdef PAGE_KERNEL_NOCACHE
26 #define COMEDI_PAGE_PROTECTION PAGE_KERNEL_NOCACHE
27 #else
28 #define COMEDI_PAGE_PROTECTION PAGE_KERNEL
29 #endif
31 static void comedi_buf_map_kref_release(struct kref *kref)
33 struct comedi_buf_map *bm =
34 container_of(kref, struct comedi_buf_map, refcount);
35 struct comedi_buf_page *buf;
36 unsigned int i;
38 if (bm->page_list) {
39 for (i = 0; i < bm->n_pages; i++) {
40 buf = &bm->page_list[i];
41 clear_bit(PG_reserved,
42 &(virt_to_page(buf->virt_addr)->flags));
43 if (bm->dma_dir != DMA_NONE) {
44 #ifdef CONFIG_HAS_DMA
45 dma_free_coherent(bm->dma_hw_dev,
46 PAGE_SIZE,
47 buf->virt_addr,
48 buf->dma_addr);
49 #endif
50 } else {
51 free_page((unsigned long)buf->virt_addr);
54 vfree(bm->page_list);
56 if (bm->dma_dir != DMA_NONE)
57 put_device(bm->dma_hw_dev);
58 kfree(bm);
61 static void __comedi_buf_free(struct comedi_device *dev,
62 struct comedi_subdevice *s)
64 struct comedi_async *async = s->async;
65 struct comedi_buf_map *bm;
66 unsigned long flags;
68 if (async->prealloc_buf) {
69 vunmap(async->prealloc_buf);
70 async->prealloc_buf = NULL;
71 async->prealloc_bufsz = 0;
74 spin_lock_irqsave(&s->spin_lock, flags);
75 bm = async->buf_map;
76 async->buf_map = NULL;
77 spin_unlock_irqrestore(&s->spin_lock, flags);
78 comedi_buf_map_put(bm);
81 static void __comedi_buf_alloc(struct comedi_device *dev,
82 struct comedi_subdevice *s,
83 unsigned n_pages)
85 struct comedi_async *async = s->async;
86 struct page **pages = NULL;
87 struct comedi_buf_map *bm;
88 struct comedi_buf_page *buf;
89 unsigned long flags;
90 unsigned i;
92 if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
93 dev_err(dev->class_dev,
94 "dma buffer allocation not supported\n");
95 return;
98 bm = kzalloc(sizeof(*async->buf_map), GFP_KERNEL);
99 if (!bm)
100 return;
102 kref_init(&bm->refcount);
103 spin_lock_irqsave(&s->spin_lock, flags);
104 async->buf_map = bm;
105 spin_unlock_irqrestore(&s->spin_lock, flags);
106 bm->dma_dir = s->async_dma_dir;
107 if (bm->dma_dir != DMA_NONE)
108 /* Need ref to hardware device to free buffer later. */
109 bm->dma_hw_dev = get_device(dev->hw_dev);
111 bm->page_list = vzalloc(sizeof(*buf) * n_pages);
112 if (bm->page_list)
113 pages = vmalloc(sizeof(struct page *) * n_pages);
115 if (!pages)
116 return;
118 for (i = 0; i < n_pages; i++) {
119 buf = &bm->page_list[i];
120 if (bm->dma_dir != DMA_NONE)
121 #ifdef CONFIG_HAS_DMA
122 buf->virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
123 PAGE_SIZE,
124 &buf->dma_addr,
125 GFP_KERNEL |
126 __GFP_COMP);
127 #else
128 break;
129 #endif
130 else
131 buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
132 if (!buf->virt_addr)
133 break;
135 set_bit(PG_reserved, &(virt_to_page(buf->virt_addr)->flags));
137 pages[i] = virt_to_page(buf->virt_addr);
139 spin_lock_irqsave(&s->spin_lock, flags);
140 bm->n_pages = i;
141 spin_unlock_irqrestore(&s->spin_lock, flags);
143 /* vmap the prealloc_buf if all the pages were allocated */
144 if (i == n_pages)
145 async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
146 COMEDI_PAGE_PROTECTION);
148 vfree(pages);
151 void comedi_buf_map_get(struct comedi_buf_map *bm)
153 if (bm)
154 kref_get(&bm->refcount);
157 int comedi_buf_map_put(struct comedi_buf_map *bm)
159 if (bm)
160 return kref_put(&bm->refcount, comedi_buf_map_kref_release);
161 return 1;
164 /* returns s->async->buf_map and increments its kref refcount */
165 struct comedi_buf_map *
166 comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
168 struct comedi_async *async = s->async;
169 struct comedi_buf_map *bm = NULL;
170 unsigned long flags;
172 if (!async)
173 return NULL;
175 spin_lock_irqsave(&s->spin_lock, flags);
176 bm = async->buf_map;
177 /* only want it if buffer pages allocated */
178 if (bm && bm->n_pages)
179 comedi_buf_map_get(bm);
180 else
181 bm = NULL;
182 spin_unlock_irqrestore(&s->spin_lock, flags);
184 return bm;
187 bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
189 struct comedi_buf_map *bm = s->async->buf_map;
191 return bm && (atomic_read(&bm->refcount.refcount) > 1);
194 int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
195 unsigned long new_size)
197 struct comedi_async *async = s->async;
199 /* Round up new_size to multiple of PAGE_SIZE */
200 new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
202 /* if no change is required, do nothing */
203 if (async->prealloc_buf && async->prealloc_bufsz == new_size)
204 return 0;
206 /* deallocate old buffer */
207 __comedi_buf_free(dev, s);
209 /* allocate new buffer */
210 if (new_size) {
211 unsigned n_pages = new_size >> PAGE_SHIFT;
213 __comedi_buf_alloc(dev, s, n_pages);
215 if (!async->prealloc_buf) {
216 /* allocation failed */
217 __comedi_buf_free(dev, s);
218 return -ENOMEM;
221 async->prealloc_bufsz = new_size;
223 return 0;
226 void comedi_buf_reset(struct comedi_subdevice *s)
228 struct comedi_async *async = s->async;
230 async->buf_write_alloc_count = 0;
231 async->buf_write_count = 0;
232 async->buf_read_alloc_count = 0;
233 async->buf_read_count = 0;
235 async->buf_write_ptr = 0;
236 async->buf_read_ptr = 0;
238 async->cur_chan = 0;
239 async->scans_done = 0;
240 async->scan_progress = 0;
241 async->munge_chan = 0;
242 async->munge_count = 0;
243 async->munge_ptr = 0;
245 async->events = 0;
248 static unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
250 struct comedi_async *async = s->async;
251 unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
253 return free_end - async->buf_write_alloc_count;
256 /* allocates chunk for the writer from free buffer space */
257 unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
258 unsigned int nbytes)
260 struct comedi_async *async = s->async;
261 unsigned int available = comedi_buf_write_n_available(s);
263 if (nbytes > available)
264 nbytes = available;
266 async->buf_write_alloc_count += nbytes;
269 * ensure the async buffer 'counts' are read and updated
270 * before we write data to the write-alloc'ed buffer space
272 smp_mb();
274 return nbytes;
276 EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
279 * munging is applied to data by core as it passes between user
280 * and kernel space
282 static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
283 unsigned int num_bytes)
285 struct comedi_async *async = s->async;
286 unsigned int count = 0;
287 const unsigned num_sample_bytes = comedi_bytes_per_sample(s);
289 if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
290 async->munge_count += num_bytes;
291 count = num_bytes;
292 } else {
293 /* don't munge partial samples */
294 num_bytes -= num_bytes % num_sample_bytes;
295 while (count < num_bytes) {
296 int block_size = num_bytes - count;
297 unsigned int buf_end;
299 buf_end = async->prealloc_bufsz - async->munge_ptr;
300 if (block_size > buf_end)
301 block_size = buf_end;
303 s->munge(s->device, s,
304 async->prealloc_buf + async->munge_ptr,
305 block_size, async->munge_chan);
308 * ensure data is munged in buffer before the
309 * async buffer munge_count is incremented
311 smp_wmb();
313 async->munge_chan += block_size / num_sample_bytes;
314 async->munge_chan %= async->cmd.chanlist_len;
315 async->munge_count += block_size;
316 async->munge_ptr += block_size;
317 async->munge_ptr %= async->prealloc_bufsz;
318 count += block_size;
322 return count;
325 unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
327 struct comedi_async *async = s->async;
329 return async->buf_write_alloc_count - async->buf_write_count;
332 /* transfers a chunk from writer to filled buffer space */
333 unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
334 unsigned int nbytes)
336 struct comedi_async *async = s->async;
337 unsigned int allocated = comedi_buf_write_n_allocated(s);
339 if (nbytes > allocated)
340 nbytes = allocated;
342 async->buf_write_count += nbytes;
343 async->buf_write_ptr += nbytes;
344 comedi_buf_munge(s, async->buf_write_count - async->munge_count);
345 if (async->buf_write_ptr >= async->prealloc_bufsz)
346 async->buf_write_ptr %= async->prealloc_bufsz;
348 return nbytes;
350 EXPORT_SYMBOL_GPL(comedi_buf_write_free);
352 unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
354 struct comedi_async *async = s->async;
355 unsigned num_bytes;
357 if (!async)
358 return 0;
360 num_bytes = async->munge_count - async->buf_read_count;
363 * ensure the async buffer 'counts' are read before we
364 * attempt to read data from the buffer
366 smp_rmb();
368 return num_bytes;
370 EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
372 /* allocates a chunk for the reader from filled (and munged) buffer space */
373 unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
374 unsigned int nbytes)
376 struct comedi_async *async = s->async;
377 unsigned int available;
379 available = async->munge_count - async->buf_read_alloc_count;
380 if (nbytes > available)
381 nbytes = available;
383 async->buf_read_alloc_count += nbytes;
386 * ensure the async buffer 'counts' are read before we
387 * attempt to read data from the read-alloc'ed buffer space
389 smp_rmb();
391 return nbytes;
393 EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
395 static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
397 return async->buf_read_alloc_count - async->buf_read_count;
400 /* transfers control of a chunk from reader to free buffer space */
401 unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
402 unsigned int nbytes)
404 struct comedi_async *async = s->async;
405 unsigned int allocated;
408 * ensure data has been read out of buffer before
409 * the async read count is incremented
411 smp_mb();
413 allocated = comedi_buf_read_n_allocated(async);
414 if (nbytes > allocated)
415 nbytes = allocated;
417 async->buf_read_count += nbytes;
418 async->buf_read_ptr += nbytes;
419 async->buf_read_ptr %= async->prealloc_bufsz;
420 return nbytes;
422 EXPORT_SYMBOL_GPL(comedi_buf_read_free);
424 static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
425 const void *data, unsigned int num_bytes)
427 struct comedi_async *async = s->async;
428 unsigned int write_ptr = async->buf_write_ptr;
430 while (num_bytes) {
431 unsigned int block_size;
433 if (write_ptr + num_bytes > async->prealloc_bufsz)
434 block_size = async->prealloc_bufsz - write_ptr;
435 else
436 block_size = num_bytes;
438 memcpy(async->prealloc_buf + write_ptr, data, block_size);
440 data += block_size;
441 num_bytes -= block_size;
443 write_ptr = 0;
447 static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
448 void *dest, unsigned int nbytes)
450 void *src;
451 struct comedi_async *async = s->async;
452 unsigned int read_ptr = async->buf_read_ptr;
454 while (nbytes) {
455 unsigned int block_size;
457 src = async->prealloc_buf + read_ptr;
459 if (nbytes >= async->prealloc_bufsz - read_ptr)
460 block_size = async->prealloc_bufsz - read_ptr;
461 else
462 block_size = nbytes;
464 memcpy(dest, src, block_size);
465 nbytes -= block_size;
466 dest += block_size;
467 read_ptr = 0;
472 * comedi_buf_write_samples - write sample data to comedi buffer
473 * @s: comedi_subdevice struct
474 * @data: samples
475 * @nsamples: number of samples
477 * Writes nsamples to the comedi buffer associated with the subdevice, marks
478 * it as written and updates the acquisition scan progress.
480 * Returns the amount of data written in bytes.
482 unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
483 const void *data, unsigned int nsamples)
485 unsigned int max_samples;
486 unsigned int nbytes;
489 * Make sure there is enough room in the buffer for all the samples.
490 * If not, clamp the nsamples to the number that will fit, flag the
491 * buffer overrun and add the samples that fit.
493 max_samples = comedi_bytes_to_samples(s,
494 comedi_buf_write_n_available(s));
495 if (nsamples > max_samples) {
496 dev_warn(s->device->class_dev, "buffer overrun\n");
497 s->async->events |= COMEDI_CB_OVERFLOW;
498 nsamples = max_samples;
501 if (nsamples == 0)
502 return 0;
504 nbytes = comedi_buf_write_alloc(s,
505 comedi_samples_to_bytes(s, nsamples));
506 comedi_buf_memcpy_to(s, data, nbytes);
507 comedi_buf_write_free(s, nbytes);
508 comedi_inc_scan_progress(s, nbytes);
509 s->async->events |= COMEDI_CB_BLOCK;
511 return nbytes;
513 EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
516 * comedi_buf_read_samples - read sample data from comedi buffer
517 * @s: comedi_subdevice struct
518 * @data: destination
519 * @nsamples: maximum number of samples to read
521 * Reads up to nsamples from the comedi buffer associated with the subdevice,
522 * marks it as read and updates the acquisition scan progress.
524 * Returns the amount of data read in bytes.
526 unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
527 void *data, unsigned int nsamples)
529 unsigned int max_samples;
530 unsigned int nbytes;
532 /* clamp nsamples to the number of full samples available */
533 max_samples = comedi_bytes_to_samples(s,
534 comedi_buf_read_n_available(s));
535 if (nsamples > max_samples)
536 nsamples = max_samples;
538 if (nsamples == 0)
539 return 0;
541 nbytes = comedi_buf_read_alloc(s,
542 comedi_samples_to_bytes(s, nsamples));
543 comedi_buf_memcpy_from(s, data, nbytes);
544 comedi_buf_read_free(s, nbytes);
545 comedi_inc_scan_progress(s, nbytes);
546 s->async->events |= COMEDI_CB_BLOCK;
548 return nbytes;
550 EXPORT_SYMBOL_GPL(comedi_buf_read_samples);