linux/scatterlist.h: Add __sg_page_iter_next()
[dragonfly.git] / sys / dev / drm / include / linux / scatterlist.h
blob9b8bf65e7b2c38b92fbd7553e36f3424e95c84ed
1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6 * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7 * Copyright (c) 2016 Matt Macy <mmacy@nextbsd.org>
8 * All rights reserved.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice unmodified, this list of conditions, and the following
15 * disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #ifndef _LINUX_SCATTERLIST_H_
33 #define _LINUX_SCATTERLIST_H_
35 #include <linux/string.h>
36 #include <linux/types.h>
37 #include <linux/bug.h>
38 #include <linux/mm.h>
39 #include <asm/io.h>
42 * SG table design.
44 * If flags bit 0 is set, then the sg field contains a pointer to the next sg
45 * table list. Otherwise the next entry is at sg + 1, can be determined using
46 * the sg_is_chain() function.
48 * If flags bit 1 is set, then this sg entry is the last element in a list,
49 * can be determined using the sg_is_last() function.
51 * See sg_next().
55 struct scatterlist {
56 union {
57 struct vm_page *page;
58 struct scatterlist *sg;
59 } sl_un;
60 unsigned long offset;
61 uint32_t length;
62 dma_addr_t dma_address;
63 uint32_t flags;
66 struct sg_table {
67 struct scatterlist *sgl; /* the list */
68 unsigned int nents; /* number of mapped entries */
69 unsigned int orig_nents; /* original size of list */
72 struct sg_page_iter {
73 struct scatterlist *sg;
74 unsigned int sg_pgoffset; /* page index */
75 unsigned int maxents;
76 unsigned int __nents;
77 int __pg_advance;
82 * Maximum number of entries that will be allocated in one piece, if
83 * a list larger than this is required then chaining will be utilized.
85 #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
87 #define sg_dma_address(sg) (sg)->dma_address
88 #define sg_dma_len(sg) (sg)->length
89 #define sg_page(sg) (sg)->sl_un.page
90 #define sg_scatternext(sg) (sg)->sl_un.sg
92 #define SG_END 0x01
93 #define SG_CHAIN 0x02
95 static inline void
96 sg_set_page(struct scatterlist *sg, struct vm_page *page, unsigned int len,
97 unsigned int offset)
99 sg_page(sg) = page;
100 sg_dma_len(sg) = len;
101 sg->offset = offset;
102 if (offset > PAGE_SIZE)
103 panic("sg_set_page: Invalid offset %d\n", offset);
106 #if 0
107 static inline void
108 sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
110 sg_set_page(sg, virt_to_page(buf), buflen,
111 ((uintptr_t)buf) & ~PAGE_MASK);
113 #endif
115 static inline void
116 sg_init_table(struct scatterlist *sg, unsigned int nents)
118 bzero(sg, sizeof(*sg) * nents);
119 sg[nents - 1].flags = SG_END;
122 static inline struct scatterlist *
123 sg_next(struct scatterlist *sg)
125 if (sg->flags & SG_END)
126 return (NULL);
127 sg++;
128 if (sg->flags & SG_CHAIN)
129 sg = sg_scatternext(sg);
130 return (sg);
133 static inline vm_paddr_t
134 sg_phys(struct scatterlist *sg)
136 return sg_page(sg)->phys_addr + sg->offset;
140 * sg_chain - Chain two sglists together
141 * @prv: First scatterlist
142 * @prv_nents: Number of entries in prv
143 * @sgl: Second scatterlist
145 * Description:
146 * Links @prv@ and @sgl@ together, to form a longer scatterlist.
149 static inline void
150 sg_chain(struct scatterlist *prv, unsigned int prv_nents,
151 struct scatterlist *sgl)
154 * offset and length are unused for chain entry. Clear them.
156 struct scatterlist *sg = &prv[prv_nents - 1];
158 sg->offset = 0;
159 sg->length = 0;
162 * Indicate a link pointer, and set the link to the second list.
164 sg->flags = SG_CHAIN;
165 sg->sl_un.sg = sgl;
169 * sg_mark_end - Mark the end of the scatterlist
170 * @sg: SG entryScatterlist
172 * Description:
173 * Marks the passed in sg entry as the termination point for the sg
174 * table. A call to sg_next() on this entry will return NULL.
177 static inline void sg_mark_end(struct scatterlist *sg)
179 sg->flags = SG_END;
183 * __sg_free_table - Free a previously mapped sg table
184 * @table: The sg table header to use
185 * @max_ents: The maximum number of entries per single scatterlist
187 * Description:
188 * Free an sg table previously allocated and setup with
189 * __sg_alloc_table(). The @max_ents value must be identical to
190 * that previously used with __sg_alloc_table().
193 static inline void
194 __sg_free_table(struct sg_table *table, unsigned int max_ents)
196 struct scatterlist *sgl, *next;
198 if (unlikely(!table->sgl))
199 return;
201 sgl = table->sgl;
202 while (table->orig_nents) {
203 unsigned int alloc_size = table->orig_nents;
204 unsigned int sg_size;
207 * If we have more than max_ents segments left,
208 * then assign 'next' to the sg table after the current one.
209 * sg_size is then one less than alloc size, since the last
210 * element is the chain pointer.
212 if (alloc_size > max_ents) {
213 next = sgl[max_ents - 1].sl_un.sg;
214 alloc_size = max_ents;
215 sg_size = alloc_size - 1;
216 } else {
217 sg_size = alloc_size;
218 next = NULL;
221 table->orig_nents -= sg_size;
222 kfree(sgl);
223 sgl = next;
226 table->sgl = NULL;
230 * sg_free_table - Free a previously allocated sg table
231 * @table: The mapped sg table header
234 static inline void
235 sg_free_table(struct sg_table *table)
237 __sg_free_table(table, SG_MAX_SINGLE_ALLOC);
241 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
242 * @table: The sg table header to use
243 * @nents: Number of entries in sg list
244 * @max_ents: The maximum number of entries the allocator returns per call
245 * @gfp_mask: GFP allocation mask
247 * Description:
248 * This function returns a @table @nents long. The allocator is
249 * defined to return scatterlist chunks of maximum size @max_ents.
250 * Thus if @nents is bigger than @max_ents, the scatterlists will be
251 * chained in units of @max_ents.
253 * Notes:
254 * If this function returns non-0 (eg failure), the caller must call
255 * __sg_free_table() to cleanup any leftover allocations.
258 static inline int
259 __sg_alloc_table(struct sg_table *table, unsigned int nents,
260 unsigned int max_ents, gfp_t gfp_mask)
262 struct scatterlist *sg, *prv;
263 unsigned int left;
265 memset(table, 0, sizeof(*table));
267 if (nents == 0)
268 return -EINVAL;
269 left = nents;
270 prv = NULL;
271 do {
272 unsigned int sg_size, alloc_size = left;
274 if (alloc_size > max_ents) {
275 alloc_size = max_ents;
276 sg_size = alloc_size - 1;
277 } else
278 sg_size = alloc_size;
280 left -= sg_size;
282 sg = kmalloc(alloc_size * sizeof(struct scatterlist), M_DRM, gfp_mask);
283 if (unlikely(!sg)) {
285 * Adjust entry count to reflect that the last
286 * entry of the previous table won't be used for
287 * linkage. Without this, sg_kfree() may get
288 * confused.
290 if (prv)
291 table->nents = ++table->orig_nents;
293 return -ENOMEM;
296 sg_init_table(sg, alloc_size);
297 table->nents = table->orig_nents += sg_size;
300 * If this is the first mapping, assign the sg table header.
301 * If this is not the first mapping, chain previous part.
303 if (prv)
304 sg_chain(prv, max_ents, sg);
305 else
306 table->sgl = sg;
309 * If no more entries after this one, mark the end
311 if (!left)
312 sg_mark_end(&sg[sg_size - 1]);
314 prv = sg;
315 } while (left);
317 return 0;
321 * sg_alloc_table - Allocate and initialize an sg table
322 * @table: The sg table header to use
323 * @nents: Number of entries in sg list
324 * @gfp_mask: GFP allocation mask
326 * Description:
327 * Allocate and initialize an sg table. If @nents@ is larger than
328 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
332 static inline int
333 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
335 int ret;
337 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
338 gfp_mask);
339 if (unlikely(ret))
340 __sg_free_table(table, SG_MAX_SINGLE_ALLOC);
342 return ret;
345 static inline int
346 sg_nents(struct scatterlist *sg)
348 int nents;
349 for (nents = 0; sg; sg = sg_next(sg))
350 nents++;
351 return nents;
354 static inline void
355 __sg_page_iter_start(struct sg_page_iter *piter,
356 struct scatterlist *sglist, unsigned int nents,
357 unsigned long pgoffset)
359 piter->__pg_advance = 0;
360 piter->__nents = nents;
362 piter->sg = sglist;
363 piter->sg_pgoffset = pgoffset;
367 * Iterate pages in sg list.
369 static inline void
370 _sg_iter_next(struct sg_page_iter *iter)
372 struct scatterlist *sg;
373 unsigned int pgcount;
375 sg = iter->sg;
376 pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
378 ++iter->sg_pgoffset;
379 while (iter->sg_pgoffset >= pgcount) {
380 iter->sg_pgoffset -= pgcount;
381 sg = sg_next(sg);
382 --iter->maxents;
383 if (sg == NULL || iter->maxents == 0)
384 break;
385 pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
387 iter->sg = sg;
390 static inline int
391 sg_page_count(struct scatterlist *sg)
393 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
396 static inline bool
397 __sg_page_iter_next(struct sg_page_iter *piter)
399 if (piter->__nents == 0)
400 return (false);
401 if (piter->sg == NULL)
402 return (false);
404 piter->sg_pgoffset += piter->__pg_advance;
405 piter->__pg_advance = 1;
407 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
408 piter->sg_pgoffset -= sg_page_count(piter->sg);
409 piter->sg = sg_next(piter->sg);
410 if (--piter->__nents == 0)
411 return (false);
412 if (piter->sg == NULL)
413 return (false);
415 return (true);
419 * NOTE: pgoffset is really a page index, not a byte offset.
421 static inline void
422 _sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
423 unsigned int nents, unsigned long pgoffset)
425 if (nents) {
427 * Nominal case. Note subtract 1 from starting page index
428 * for initial _sg_iter_next() call.
430 iter->sg = sgl;
431 iter->sg_pgoffset = pgoffset - 1;
432 iter->maxents = nents;
433 _sg_iter_next(iter);
434 } else {
436 * Degenerate case
438 iter->sg = NULL;
439 iter->sg_pgoffset = 0;
440 iter->maxents = 0;
444 static inline struct vm_page *
445 sg_page_iter_page(struct sg_page_iter *piter)
447 return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
450 static inline dma_addr_t
451 sg_page_iter_dma_address(struct sg_page_iter *spi)
453 return spi->sg->dma_address + (spi->sg_pgoffset << PAGE_SHIFT);
456 #define for_each_sg_page(sgl, iter, nents, pgoffset) \
457 for (_sg_iter_init(sgl, iter, nents, pgoffset); \
458 (iter)->sg; _sg_iter_next(iter))
460 #define for_each_sg(sglist, sg, sgmax, _itr) \
461 for (_itr = 0, sg = (sglist); _itr < (sgmax); _itr++, sg = sg_next(sg))
463 #endif /* _LINUX_SCATTERLIST_H_ */