Linux 4.19-rc7
[linux-2.6/btrfs-unstable.git] / drivers / staging / comedi / comedi_buf.c
blobf693c2c0bec3cc6843072ff714bfb8ef0b3e2fbb
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * comedi_buf.c
5 * COMEDI - Linux Control and Measurement Device Interface
6 * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
7 * Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
8 */
10 #include <linux/vmalloc.h>
11 #include <linux/slab.h>
13 #include "comedidev.h"
14 #include "comedi_internal.h"
16 #ifdef PAGE_KERNEL_NOCACHE
17 #define COMEDI_PAGE_PROTECTION PAGE_KERNEL_NOCACHE
18 #else
19 #define COMEDI_PAGE_PROTECTION PAGE_KERNEL
20 #endif
22 static void comedi_buf_map_kref_release(struct kref *kref)
24 struct comedi_buf_map *bm =
25 container_of(kref, struct comedi_buf_map, refcount);
26 struct comedi_buf_page *buf;
27 unsigned int i;
29 if (bm->page_list) {
30 for (i = 0; i < bm->n_pages; i++) {
31 buf = &bm->page_list[i];
32 clear_bit(PG_reserved,
33 &(virt_to_page(buf->virt_addr)->flags));
34 if (bm->dma_dir != DMA_NONE) {
35 #ifdef CONFIG_HAS_DMA
36 dma_free_coherent(bm->dma_hw_dev,
37 PAGE_SIZE,
38 buf->virt_addr,
39 buf->dma_addr);
40 #endif
41 } else {
42 free_page((unsigned long)buf->virt_addr);
45 vfree(bm->page_list);
47 if (bm->dma_dir != DMA_NONE)
48 put_device(bm->dma_hw_dev);
49 kfree(bm);
52 static void __comedi_buf_free(struct comedi_device *dev,
53 struct comedi_subdevice *s)
55 struct comedi_async *async = s->async;
56 struct comedi_buf_map *bm;
57 unsigned long flags;
59 if (async->prealloc_buf) {
60 vunmap(async->prealloc_buf);
61 async->prealloc_buf = NULL;
62 async->prealloc_bufsz = 0;
65 spin_lock_irqsave(&s->spin_lock, flags);
66 bm = async->buf_map;
67 async->buf_map = NULL;
68 spin_unlock_irqrestore(&s->spin_lock, flags);
69 comedi_buf_map_put(bm);
72 static void __comedi_buf_alloc(struct comedi_device *dev,
73 struct comedi_subdevice *s,
74 unsigned int n_pages)
76 struct comedi_async *async = s->async;
77 struct page **pages = NULL;
78 struct comedi_buf_map *bm;
79 struct comedi_buf_page *buf;
80 unsigned long flags;
81 unsigned int i;
83 if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
84 dev_err(dev->class_dev,
85 "dma buffer allocation not supported\n");
86 return;
89 bm = kzalloc(sizeof(*async->buf_map), GFP_KERNEL);
90 if (!bm)
91 return;
93 kref_init(&bm->refcount);
94 spin_lock_irqsave(&s->spin_lock, flags);
95 async->buf_map = bm;
96 spin_unlock_irqrestore(&s->spin_lock, flags);
97 bm->dma_dir = s->async_dma_dir;
98 if (bm->dma_dir != DMA_NONE)
99 /* Need ref to hardware device to free buffer later. */
100 bm->dma_hw_dev = get_device(dev->hw_dev);
102 bm->page_list = vzalloc(sizeof(*buf) * n_pages);
103 if (bm->page_list)
104 pages = vmalloc(sizeof(struct page *) * n_pages);
106 if (!pages)
107 return;
109 for (i = 0; i < n_pages; i++) {
110 buf = &bm->page_list[i];
111 if (bm->dma_dir != DMA_NONE)
112 #ifdef CONFIG_HAS_DMA
113 buf->virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
114 PAGE_SIZE,
115 &buf->dma_addr,
116 GFP_KERNEL |
117 __GFP_COMP);
118 #else
119 break;
120 #endif
121 else
122 buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
123 if (!buf->virt_addr)
124 break;
126 set_bit(PG_reserved, &(virt_to_page(buf->virt_addr)->flags));
128 pages[i] = virt_to_page(buf->virt_addr);
130 spin_lock_irqsave(&s->spin_lock, flags);
131 bm->n_pages = i;
132 spin_unlock_irqrestore(&s->spin_lock, flags);
134 /* vmap the prealloc_buf if all the pages were allocated */
135 if (i == n_pages)
136 async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
137 COMEDI_PAGE_PROTECTION);
139 vfree(pages);
142 void comedi_buf_map_get(struct comedi_buf_map *bm)
144 if (bm)
145 kref_get(&bm->refcount);
148 int comedi_buf_map_put(struct comedi_buf_map *bm)
150 if (bm)
151 return kref_put(&bm->refcount, comedi_buf_map_kref_release);
152 return 1;
155 /* helper for "access" vm operation */
156 int comedi_buf_map_access(struct comedi_buf_map *bm, unsigned long offset,
157 void *buf, int len, int write)
159 unsigned int pgoff = offset_in_page(offset);
160 unsigned long pg = offset >> PAGE_SHIFT;
161 int done = 0;
163 while (done < len && pg < bm->n_pages) {
164 int l = min_t(int, len - done, PAGE_SIZE - pgoff);
165 void *b = bm->page_list[pg].virt_addr + pgoff;
167 if (write)
168 memcpy(b, buf, l);
169 else
170 memcpy(buf, b, l);
171 buf += l;
172 done += l;
173 pg++;
174 pgoff = 0;
176 return done;
179 /* returns s->async->buf_map and increments its kref refcount */
180 struct comedi_buf_map *
181 comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
183 struct comedi_async *async = s->async;
184 struct comedi_buf_map *bm = NULL;
185 unsigned long flags;
187 if (!async)
188 return NULL;
190 spin_lock_irqsave(&s->spin_lock, flags);
191 bm = async->buf_map;
192 /* only want it if buffer pages allocated */
193 if (bm && bm->n_pages)
194 comedi_buf_map_get(bm);
195 else
196 bm = NULL;
197 spin_unlock_irqrestore(&s->spin_lock, flags);
199 return bm;
202 bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
204 struct comedi_buf_map *bm = s->async->buf_map;
206 return bm && (kref_read(&bm->refcount) > 1);
209 int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
210 unsigned long new_size)
212 struct comedi_async *async = s->async;
214 /* Round up new_size to multiple of PAGE_SIZE */
215 new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
217 /* if no change is required, do nothing */
218 if (async->prealloc_buf && async->prealloc_bufsz == new_size)
219 return 0;
221 /* deallocate old buffer */
222 __comedi_buf_free(dev, s);
224 /* allocate new buffer */
225 if (new_size) {
226 unsigned int n_pages = new_size >> PAGE_SHIFT;
228 __comedi_buf_alloc(dev, s, n_pages);
230 if (!async->prealloc_buf) {
231 /* allocation failed */
232 __comedi_buf_free(dev, s);
233 return -ENOMEM;
236 async->prealloc_bufsz = new_size;
238 return 0;
241 void comedi_buf_reset(struct comedi_subdevice *s)
243 struct comedi_async *async = s->async;
245 async->buf_write_alloc_count = 0;
246 async->buf_write_count = 0;
247 async->buf_read_alloc_count = 0;
248 async->buf_read_count = 0;
250 async->buf_write_ptr = 0;
251 async->buf_read_ptr = 0;
253 async->cur_chan = 0;
254 async->scans_done = 0;
255 async->scan_progress = 0;
256 async->munge_chan = 0;
257 async->munge_count = 0;
258 async->munge_ptr = 0;
260 async->events = 0;
263 static unsigned int comedi_buf_write_n_unalloc(struct comedi_subdevice *s)
265 struct comedi_async *async = s->async;
266 unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
268 return free_end - async->buf_write_alloc_count;
271 unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
273 struct comedi_async *async = s->async;
274 unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
276 return free_end - async->buf_write_count;
280 * comedi_buf_write_alloc() - Reserve buffer space for writing
281 * @s: COMEDI subdevice.
282 * @nbytes: Maximum space to reserve in bytes.
284 * Reserve up to @nbytes bytes of space to be written in the COMEDI acquisition
285 * data buffer associated with the subdevice. The amount reserved is limited
286 * by the space available.
288 * Return: The amount of space reserved in bytes.
290 unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
291 unsigned int nbytes)
293 struct comedi_async *async = s->async;
294 unsigned int unalloc = comedi_buf_write_n_unalloc(s);
296 if (nbytes > unalloc)
297 nbytes = unalloc;
299 async->buf_write_alloc_count += nbytes;
302 * ensure the async buffer 'counts' are read and updated
303 * before we write data to the write-alloc'ed buffer space
305 smp_mb();
307 return nbytes;
309 EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
312 * munging is applied to data by core as it passes between user
313 * and kernel space
315 static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
316 unsigned int num_bytes)
318 struct comedi_async *async = s->async;
319 unsigned int count = 0;
320 const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
322 if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
323 async->munge_count += num_bytes;
324 count = num_bytes;
325 } else {
326 /* don't munge partial samples */
327 num_bytes -= num_bytes % num_sample_bytes;
328 while (count < num_bytes) {
329 int block_size = num_bytes - count;
330 unsigned int buf_end;
332 buf_end = async->prealloc_bufsz - async->munge_ptr;
333 if (block_size > buf_end)
334 block_size = buf_end;
336 s->munge(s->device, s,
337 async->prealloc_buf + async->munge_ptr,
338 block_size, async->munge_chan);
341 * ensure data is munged in buffer before the
342 * async buffer munge_count is incremented
344 smp_wmb();
346 async->munge_chan += block_size / num_sample_bytes;
347 async->munge_chan %= async->cmd.chanlist_len;
348 async->munge_count += block_size;
349 async->munge_ptr += block_size;
350 async->munge_ptr %= async->prealloc_bufsz;
351 count += block_size;
355 return count;
358 unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
360 struct comedi_async *async = s->async;
362 return async->buf_write_alloc_count - async->buf_write_count;
366 * comedi_buf_write_free() - Free buffer space after it is written
367 * @s: COMEDI subdevice.
368 * @nbytes: Maximum space to free in bytes.
370 * Free up to @nbytes bytes of space previously reserved for writing in the
371 * COMEDI acquisition data buffer associated with the subdevice. The amount of
372 * space freed is limited to the amount that was reserved. The freed space is
373 * assumed to have been filled with sample data by the writer.
375 * If the samples in the freed space need to be "munged", do so here. The
376 * freed space becomes available for allocation by the reader.
378 * Return: The amount of space freed in bytes.
380 unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
381 unsigned int nbytes)
383 struct comedi_async *async = s->async;
384 unsigned int allocated = comedi_buf_write_n_allocated(s);
386 if (nbytes > allocated)
387 nbytes = allocated;
389 async->buf_write_count += nbytes;
390 async->buf_write_ptr += nbytes;
391 comedi_buf_munge(s, async->buf_write_count - async->munge_count);
392 if (async->buf_write_ptr >= async->prealloc_bufsz)
393 async->buf_write_ptr %= async->prealloc_bufsz;
395 return nbytes;
397 EXPORT_SYMBOL_GPL(comedi_buf_write_free);
400 * comedi_buf_read_n_available() - Determine amount of readable buffer space
401 * @s: COMEDI subdevice.
403 * Determine the amount of readable buffer space in the COMEDI acquisition data
404 * buffer associated with the subdevice. The readable buffer space is that
405 * which has been freed by the writer and "munged" to the sample data format
406 * expected by COMEDI if necessary.
408 * Return: The amount of readable buffer space.
410 unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
412 struct comedi_async *async = s->async;
413 unsigned int num_bytes;
415 if (!async)
416 return 0;
418 num_bytes = async->munge_count - async->buf_read_count;
421 * ensure the async buffer 'counts' are read before we
422 * attempt to read data from the buffer
424 smp_rmb();
426 return num_bytes;
428 EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
431 * comedi_buf_read_alloc() - Reserve buffer space for reading
432 * @s: COMEDI subdevice.
433 * @nbytes: Maximum space to reserve in bytes.
435 * Reserve up to @nbytes bytes of previously written and "munged" buffer space
436 * for reading in the COMEDI acquisition data buffer associated with the
437 * subdevice. The amount reserved is limited to the space available. The
438 * reader can read from the reserved space and then free it. A reader is also
439 * allowed to read from the space before reserving it as long as it determines
440 * the amount of readable data available, but the space needs to be marked as
441 * reserved before it can be freed.
443 * Return: The amount of space reserved in bytes.
445 unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
446 unsigned int nbytes)
448 struct comedi_async *async = s->async;
449 unsigned int available;
451 available = async->munge_count - async->buf_read_alloc_count;
452 if (nbytes > available)
453 nbytes = available;
455 async->buf_read_alloc_count += nbytes;
458 * ensure the async buffer 'counts' are read before we
459 * attempt to read data from the read-alloc'ed buffer space
461 smp_rmb();
463 return nbytes;
465 EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
467 static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
469 return async->buf_read_alloc_count - async->buf_read_count;
473 * comedi_buf_read_free() - Free buffer space after it has been read
474 * @s: COMEDI subdevice.
475 * @nbytes: Maximum space to free in bytes.
477 * Free up to @nbytes bytes of buffer space previously reserved for reading in
478 * the COMEDI acquisition data buffer associated with the subdevice. The
479 * amount of space freed is limited to the amount that was reserved.
481 * The freed space becomes available for allocation by the writer.
483 * Return: The amount of space freed in bytes.
485 unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
486 unsigned int nbytes)
488 struct comedi_async *async = s->async;
489 unsigned int allocated;
492 * ensure data has been read out of buffer before
493 * the async read count is incremented
495 smp_mb();
497 allocated = comedi_buf_read_n_allocated(async);
498 if (nbytes > allocated)
499 nbytes = allocated;
501 async->buf_read_count += nbytes;
502 async->buf_read_ptr += nbytes;
503 async->buf_read_ptr %= async->prealloc_bufsz;
504 return nbytes;
506 EXPORT_SYMBOL_GPL(comedi_buf_read_free);
508 static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
509 const void *data, unsigned int num_bytes)
511 struct comedi_async *async = s->async;
512 unsigned int write_ptr = async->buf_write_ptr;
514 while (num_bytes) {
515 unsigned int block_size;
517 if (write_ptr + num_bytes > async->prealloc_bufsz)
518 block_size = async->prealloc_bufsz - write_ptr;
519 else
520 block_size = num_bytes;
522 memcpy(async->prealloc_buf + write_ptr, data, block_size);
524 data += block_size;
525 num_bytes -= block_size;
527 write_ptr = 0;
531 static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
532 void *dest, unsigned int nbytes)
534 void *src;
535 struct comedi_async *async = s->async;
536 unsigned int read_ptr = async->buf_read_ptr;
538 while (nbytes) {
539 unsigned int block_size;
541 src = async->prealloc_buf + read_ptr;
543 if (nbytes >= async->prealloc_bufsz - read_ptr)
544 block_size = async->prealloc_bufsz - read_ptr;
545 else
546 block_size = nbytes;
548 memcpy(dest, src, block_size);
549 nbytes -= block_size;
550 dest += block_size;
551 read_ptr = 0;
556 * comedi_buf_write_samples() - Write sample data to COMEDI buffer
557 * @s: COMEDI subdevice.
558 * @data: Pointer to source samples.
559 * @nsamples: Number of samples to write.
561 * Write up to @nsamples samples to the COMEDI acquisition data buffer
562 * associated with the subdevice, mark it as written and update the
563 * acquisition scan progress. If there is not enough room for the specified
564 * number of samples, the number of samples written is limited to the number
565 * that will fit and the %COMEDI_CB_OVERFLOW event flag is set to cause the
566 * acquisition to terminate with an overrun error. Set the %COMEDI_CB_BLOCK
567 * event flag if any samples are written to cause waiting tasks to be woken
568 * when the event flags are processed.
570 * Return: The amount of data written in bytes.
572 unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
573 const void *data, unsigned int nsamples)
575 unsigned int max_samples;
576 unsigned int nbytes;
579 * Make sure there is enough room in the buffer for all the samples.
580 * If not, clamp the nsamples to the number that will fit, flag the
581 * buffer overrun and add the samples that fit.
583 max_samples = comedi_bytes_to_samples(s, comedi_buf_write_n_unalloc(s));
584 if (nsamples > max_samples) {
585 dev_warn(s->device->class_dev, "buffer overrun\n");
586 s->async->events |= COMEDI_CB_OVERFLOW;
587 nsamples = max_samples;
590 if (nsamples == 0)
591 return 0;
593 nbytes = comedi_buf_write_alloc(s,
594 comedi_samples_to_bytes(s, nsamples));
595 comedi_buf_memcpy_to(s, data, nbytes);
596 comedi_buf_write_free(s, nbytes);
597 comedi_inc_scan_progress(s, nbytes);
598 s->async->events |= COMEDI_CB_BLOCK;
600 return nbytes;
602 EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
605 * comedi_buf_read_samples() - Read sample data from COMEDI buffer
606 * @s: COMEDI subdevice.
607 * @data: Pointer to destination.
608 * @nsamples: Maximum number of samples to read.
610 * Read up to @nsamples samples from the COMEDI acquisition data buffer
611 * associated with the subdevice, mark it as read and update the acquisition
612 * scan progress. Limit the number of samples read to the number available.
613 * Set the %COMEDI_CB_BLOCK event flag if any samples are read to cause waiting
614 * tasks to be woken when the event flags are processed.
616 * Return: The amount of data read in bytes.
618 unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
619 void *data, unsigned int nsamples)
621 unsigned int max_samples;
622 unsigned int nbytes;
624 /* clamp nsamples to the number of full samples available */
625 max_samples = comedi_bytes_to_samples(s,
626 comedi_buf_read_n_available(s));
627 if (nsamples > max_samples)
628 nsamples = max_samples;
630 if (nsamples == 0)
631 return 0;
633 nbytes = comedi_buf_read_alloc(s,
634 comedi_samples_to_bytes(s, nsamples));
635 comedi_buf_memcpy_from(s, data, nbytes);
636 comedi_buf_read_free(s, nbytes);
637 comedi_inc_scan_progress(s, nbytes);
638 s->async->events |= COMEDI_CB_BLOCK;
640 return nbytes;
642 EXPORT_SYMBOL_GPL(comedi_buf_read_samples);