Revert "ALSA: hda - Shut up pins at power-saving mode with Conexnat codecs"
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / lib / scatterlist.c
blob7874b01e816e815466ccee3a08387b075503c72f
1 /*
2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
4 * Scatterlist handling helpers.
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
8 */
9 #include <linux/export.h>
10 #include <linux/slab.h>
11 #include <linux/scatterlist.h>
12 #include <linux/highmem.h>
13 #include <linux/kmemleak.h>
15 /**
16 * sg_next - return the next scatterlist entry in a list
17 * @sg: The current sg entry
19 * Description:
20 * Usually the next entry will be @sg@ + 1, but if this sg element is part
21 * of a chained scatterlist, it could jump to the start of a new
22 * scatterlist array.
24 **/
25 struct scatterlist *sg_next(struct scatterlist *sg)
27 #ifdef CONFIG_DEBUG_SG
28 BUG_ON(sg->sg_magic != SG_MAGIC);
29 #endif
30 if (sg_is_last(sg))
31 return NULL;
33 sg++;
34 if (unlikely(sg_is_chain(sg)))
35 sg = sg_chain_ptr(sg);
37 return sg;
39 EXPORT_SYMBOL(sg_next);
41 /**
42 * sg_nents - return total count of entries in scatterlist
43 * @sg: The scatterlist
45 * Description:
46 * Allows to know how many entries are in sg, taking into acount
47 * chaining as well
49 **/
50 int sg_nents(struct scatterlist *sg)
52 int nents;
53 for (nents = 0; sg; sg = sg_next(sg))
54 nents++;
55 return nents;
57 EXPORT_SYMBOL(sg_nents);
60 /**
61 * sg_last - return the last scatterlist entry in a list
62 * @sgl: First entry in the scatterlist
63 * @nents: Number of entries in the scatterlist
65 * Description:
66 * Should only be used casually, it (currently) scans the entire list
67 * to get the last entry.
69 * Note that the @sgl@ pointer passed in need not be the first one,
70 * the important bit is that @nents@ denotes the number of entries that
71 * exist from @sgl@.
73 **/
74 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
76 #ifndef ARCH_HAS_SG_CHAIN
77 struct scatterlist *ret = &sgl[nents - 1];
78 #else
79 struct scatterlist *sg, *ret = NULL;
80 unsigned int i;
82 for_each_sg(sgl, sg, nents, i)
83 ret = sg;
85 #endif
86 #ifdef CONFIG_DEBUG_SG
87 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
88 BUG_ON(!sg_is_last(ret));
89 #endif
90 return ret;
92 EXPORT_SYMBOL(sg_last);
94 /**
95 * sg_init_table - Initialize SG table
96 * @sgl: The SG table
97 * @nents: Number of entries in table
99 * Notes:
100 * If this is part of a chained sg table, sg_mark_end() should be
101 * used only on the last table part.
104 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
106 memset(sgl, 0, sizeof(*sgl) * nents);
107 #ifdef CONFIG_DEBUG_SG
109 unsigned int i;
110 for (i = 0; i < nents; i++)
111 sgl[i].sg_magic = SG_MAGIC;
113 #endif
114 sg_mark_end(&sgl[nents - 1]);
116 EXPORT_SYMBOL(sg_init_table);
119 * sg_init_one - Initialize a single entry sg list
120 * @sg: SG entry
121 * @buf: Virtual address for IO
122 * @buflen: IO length
125 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
127 sg_init_table(sg, 1);
128 sg_set_buf(sg, buf, buflen);
130 EXPORT_SYMBOL(sg_init_one);
133 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
134 * helpers.
136 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
138 if (nents == SG_MAX_SINGLE_ALLOC) {
140 * Kmemleak doesn't track page allocations as they are not
141 * commonly used (in a raw form) for kernel data structures.
142 * As we chain together a list of pages and then a normal
143 * kmalloc (tracked by kmemleak), in order to for that last
144 * allocation not to become decoupled (and thus a
145 * false-positive) we need to inform kmemleak of all the
146 * intermediate allocations.
148 void *ptr = (void *) __get_free_page(gfp_mask);
149 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
150 return ptr;
151 } else
152 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
155 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
157 if (nents == SG_MAX_SINGLE_ALLOC) {
158 kmemleak_free(sg);
159 free_page((unsigned long) sg);
160 } else
161 kfree(sg);
165 * __sg_free_table - Free a previously mapped sg table
166 * @table: The sg table header to use
167 * @max_ents: The maximum number of entries per single scatterlist
168 * @free_fn: Free function
170 * Description:
171 * Free an sg table previously allocated and setup with
172 * __sg_alloc_table(). The @max_ents value must be identical to
173 * that previously used with __sg_alloc_table().
176 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
177 sg_free_fn *free_fn)
179 struct scatterlist *sgl, *next;
181 if (unlikely(!table->sgl))
182 return;
184 sgl = table->sgl;
185 while (table->orig_nents) {
186 unsigned int alloc_size = table->orig_nents;
187 unsigned int sg_size;
190 * If we have more than max_ents segments left,
191 * then assign 'next' to the sg table after the current one.
192 * sg_size is then one less than alloc size, since the last
193 * element is the chain pointer.
195 if (alloc_size > max_ents) {
196 next = sg_chain_ptr(&sgl[max_ents - 1]);
197 alloc_size = max_ents;
198 sg_size = alloc_size - 1;
199 } else {
200 sg_size = alloc_size;
201 next = NULL;
204 table->orig_nents -= sg_size;
205 free_fn(sgl, alloc_size);
206 sgl = next;
209 table->sgl = NULL;
211 EXPORT_SYMBOL(__sg_free_table);
214 * sg_free_table - Free a previously allocated sg table
215 * @table: The mapped sg table header
218 void sg_free_table(struct sg_table *table)
220 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
222 EXPORT_SYMBOL(sg_free_table);
225 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
226 * @table: The sg table header to use
227 * @nents: Number of entries in sg list
228 * @max_ents: The maximum number of entries the allocator returns per call
229 * @gfp_mask: GFP allocation mask
230 * @alloc_fn: Allocator to use
232 * Description:
233 * This function returns a @table @nents long. The allocator is
234 * defined to return scatterlist chunks of maximum size @max_ents.
235 * Thus if @nents is bigger than @max_ents, the scatterlists will be
236 * chained in units of @max_ents.
238 * Notes:
239 * If this function returns non-0 (eg failure), the caller must call
240 * __sg_free_table() to cleanup any leftover allocations.
243 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
244 unsigned int max_ents, gfp_t gfp_mask,
245 sg_alloc_fn *alloc_fn)
247 struct scatterlist *sg, *prv;
248 unsigned int left;
250 #ifndef ARCH_HAS_SG_CHAIN
251 if (WARN_ON_ONCE(nents > max_ents))
252 return -EINVAL;
253 #endif
255 memset(table, 0, sizeof(*table));
257 left = nents;
258 prv = NULL;
259 do {
260 unsigned int sg_size, alloc_size = left;
262 if (alloc_size > max_ents) {
263 alloc_size = max_ents;
264 sg_size = alloc_size - 1;
265 } else
266 sg_size = alloc_size;
268 left -= sg_size;
270 sg = alloc_fn(alloc_size, gfp_mask);
271 if (unlikely(!sg)) {
273 * Adjust entry count to reflect that the last
274 * entry of the previous table won't be used for
275 * linkage. Without this, sg_kfree() may get
276 * confused.
278 if (prv)
279 table->nents = ++table->orig_nents;
281 return -ENOMEM;
284 sg_init_table(sg, alloc_size);
285 table->nents = table->orig_nents += sg_size;
288 * If this is the first mapping, assign the sg table header.
289 * If this is not the first mapping, chain previous part.
291 if (prv)
292 sg_chain(prv, max_ents, sg);
293 else
294 table->sgl = sg;
297 * If no more entries after this one, mark the end
299 if (!left)
300 sg_mark_end(&sg[sg_size - 1]);
302 prv = sg;
303 } while (left);
305 return 0;
307 EXPORT_SYMBOL(__sg_alloc_table);
310 * sg_alloc_table - Allocate and initialize an sg table
311 * @table: The sg table header to use
312 * @nents: Number of entries in sg list
313 * @gfp_mask: GFP allocation mask
315 * Description:
316 * Allocate and initialize an sg table. If @nents@ is larger than
317 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
320 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
322 int ret;
324 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
325 gfp_mask, sg_kmalloc);
326 if (unlikely(ret))
327 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
329 return ret;
331 EXPORT_SYMBOL(sg_alloc_table);
334 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
335 * an array of pages
336 * @sgt: The sg table header to use
337 * @pages: Pointer to an array of page pointers
338 * @n_pages: Number of pages in the pages array
339 * @offset: Offset from start of the first page to the start of a buffer
340 * @size: Number of valid bytes in the buffer (after offset)
341 * @gfp_mask: GFP allocation mask
343 * Description:
344 * Allocate and initialize an sg table from a list of pages. Contiguous
345 * ranges of the pages are squashed into a single scatterlist node. A user
346 * may provide an offset at a start and a size of valid data in a buffer
347 * specified by the page array. The returned sg table is released by
348 * sg_free_table.
350 * Returns:
351 * 0 on success, negative error on failure
353 int sg_alloc_table_from_pages(struct sg_table *sgt,
354 struct page **pages, unsigned int n_pages,
355 unsigned long offset, unsigned long size,
356 gfp_t gfp_mask)
358 unsigned int chunks;
359 unsigned int i;
360 unsigned int cur_page;
361 int ret;
362 struct scatterlist *s;
364 /* compute number of contiguous chunks */
365 chunks = 1;
366 for (i = 1; i < n_pages; ++i)
367 if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
368 ++chunks;
370 ret = sg_alloc_table(sgt, chunks, gfp_mask);
371 if (unlikely(ret))
372 return ret;
374 /* merging chunks and putting them into the scatterlist */
375 cur_page = 0;
376 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
377 unsigned long chunk_size;
378 unsigned int j;
380 /* look for the end of the current chunk */
381 for (j = cur_page + 1; j < n_pages; ++j)
382 if (page_to_pfn(pages[j]) !=
383 page_to_pfn(pages[j - 1]) + 1)
384 break;
386 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
387 sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
388 size -= chunk_size;
389 offset = 0;
390 cur_page = j;
393 return 0;
395 EXPORT_SYMBOL(sg_alloc_table_from_pages);
398 * sg_miter_start - start mapping iteration over a sg list
399 * @miter: sg mapping iter to be started
400 * @sgl: sg list to iterate over
401 * @nents: number of sg entries
403 * Description:
404 * Starts mapping iterator @miter.
406 * Context:
407 * Don't care.
409 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
410 unsigned int nents, unsigned int flags)
412 memset(miter, 0, sizeof(struct sg_mapping_iter));
414 miter->__sg = sgl;
415 miter->__nents = nents;
416 miter->__offset = 0;
417 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
418 miter->__flags = flags;
420 EXPORT_SYMBOL(sg_miter_start);
423 * sg_miter_next - proceed mapping iterator to the next mapping
424 * @miter: sg mapping iter to proceed
426 * Description:
427 * Proceeds @miter to the next mapping. @miter should have been started
428 * using sg_miter_start(). On successful return, @miter->page,
429 * @miter->addr and @miter->length point to the current mapping.
431 * Context:
432 * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
433 * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
435 * Returns:
436 * true if @miter contains the next mapping. false if end of sg
437 * list is reached.
439 bool sg_miter_next(struct sg_mapping_iter *miter)
441 unsigned int off, len;
443 /* check for end and drop resources from the last iteration */
444 if (!miter->__nents)
445 return false;
447 sg_miter_stop(miter);
449 /* get to the next sg if necessary. __offset is adjusted by stop */
450 while (miter->__offset == miter->__sg->length) {
451 if (--miter->__nents) {
452 miter->__sg = sg_next(miter->__sg);
453 miter->__offset = 0;
454 } else
455 return false;
458 /* map the next page */
459 off = miter->__sg->offset + miter->__offset;
460 len = miter->__sg->length - miter->__offset;
462 miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
463 off &= ~PAGE_MASK;
464 miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
465 miter->consumed = miter->length;
467 if (miter->__flags & SG_MITER_ATOMIC)
468 miter->addr = kmap_atomic(miter->page) + off;
469 else
470 miter->addr = kmap(miter->page) + off;
472 return true;
474 EXPORT_SYMBOL(sg_miter_next);
477 * sg_miter_stop - stop mapping iteration
478 * @miter: sg mapping iter to be stopped
480 * Description:
481 * Stops mapping iterator @miter. @miter should have been started
482 * started using sg_miter_start(). A stopped iteration can be
483 * resumed by calling sg_miter_next() on it. This is useful when
484 * resources (kmap) need to be released during iteration.
486 * Context:
487 * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
488 * otherwise.
490 void sg_miter_stop(struct sg_mapping_iter *miter)
492 WARN_ON(miter->consumed > miter->length);
494 /* drop resources from the last iteration */
495 if (miter->addr) {
496 miter->__offset += miter->consumed;
498 if (miter->__flags & SG_MITER_TO_SG)
499 flush_kernel_dcache_page(miter->page);
501 if (miter->__flags & SG_MITER_ATOMIC) {
502 WARN_ON_ONCE(preemptible());
503 kunmap_atomic(miter->addr);
504 } else
505 kunmap(miter->page);
507 miter->page = NULL;
508 miter->addr = NULL;
509 miter->length = 0;
510 miter->consumed = 0;
513 EXPORT_SYMBOL(sg_miter_stop);
516 * sg_copy_buffer - Copy data between a linear buffer and an SG list
517 * @sgl: The SG list
518 * @nents: Number of SG entries
519 * @buf: Where to copy from
520 * @buflen: The number of bytes to copy
521 * @to_buffer: transfer direction (non zero == from an sg list to a
522 * buffer, 0 == from a buffer to an sg list
524 * Returns the number of copied bytes.
527 static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
528 void *buf, size_t buflen, int to_buffer)
530 unsigned int offset = 0;
531 struct sg_mapping_iter miter;
532 unsigned long flags;
533 unsigned int sg_flags = SG_MITER_ATOMIC;
535 if (to_buffer)
536 sg_flags |= SG_MITER_FROM_SG;
537 else
538 sg_flags |= SG_MITER_TO_SG;
540 sg_miter_start(&miter, sgl, nents, sg_flags);
542 local_irq_save(flags);
544 while (sg_miter_next(&miter) && offset < buflen) {
545 unsigned int len;
547 len = min(miter.length, buflen - offset);
549 if (to_buffer)
550 memcpy(buf + offset, miter.addr, len);
551 else
552 memcpy(miter.addr, buf + offset, len);
554 offset += len;
557 sg_miter_stop(&miter);
559 local_irq_restore(flags);
560 return offset;
564 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
565 * @sgl: The SG list
566 * @nents: Number of SG entries
567 * @buf: Where to copy from
568 * @buflen: The number of bytes to copy
570 * Returns the number of copied bytes.
573 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
574 void *buf, size_t buflen)
576 return sg_copy_buffer(sgl, nents, buf, buflen, 0);
578 EXPORT_SYMBOL(sg_copy_from_buffer);
581 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
582 * @sgl: The SG list
583 * @nents: Number of SG entries
584 * @buf: Where to copy to
585 * @buflen: The number of bytes to copy
587 * Returns the number of copied bytes.
590 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
591 void *buf, size_t buflen)
593 return sg_copy_buffer(sgl, nents, buf, buflen, 1);
595 EXPORT_SYMBOL(sg_copy_to_buffer);