drm/i915/gem: Switch to the Linux scatterlist API
[dragonfly.git] / sys / dev / drm / include / linux / scatterlist.h
blob26204dc0ccc744f335a790bfc0098fe2b2f4c9ed
1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6 * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7 * All rights reserved.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice unmodified, this list of conditions, and the following
14 * disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef _LINUX_SCATTERLIST_H_
32 #define _LINUX_SCATTERLIST_H_
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/bug.h>
37 #include <linux/mm.h>
38 #include <asm/io.h>
41 * SG table design.
43 * If flags bit 0 is set, then the sg field contains a pointer to the next sg
44 * table list. Otherwise the next entry is at sg + 1, can be determined using
45 * the sg_is_chain() function.
47 * If flags bit 1 is set, then this sg entry is the last element in a list,
48 * can be determined using the sg_is_last() function.
50 * See sg_next().
54 struct scatterlist {
55 union {
56 struct vm_page *page;
57 struct scatterlist *sg;
58 } sl_un;
59 unsigned long offset;
60 uint32_t length;
61 dma_addr_t dma_address;
62 uint32_t flags;
65 struct sg_table {
66 struct scatterlist *sgl; /* the list */
67 unsigned int nents; /* number of mapped entries */
68 unsigned int orig_nents; /* original size of list */
71 struct sg_page_iter {
72 struct scatterlist *sg;
73 unsigned int sg_pgoffset; /* page index */
74 unsigned int maxents;
79 * Maximum number of entries that will be allocated in one piece, if
80 * a list larger than this is required then chaining will be utilized.
82 #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
84 #define sg_dma_address(sg) (sg)->dma_address
85 #define sg_dma_len(sg) (sg)->length
86 #define sg_page(sg) (sg)->sl_un.page
87 #define sg_scatternext(sg) (sg)->sl_un.sg
89 #define SG_END 0x01
90 #define SG_CHAIN 0x02
92 static inline void
93 sg_set_page(struct scatterlist *sg, struct vm_page *page, unsigned int len,
94 unsigned int offset)
96 sg_page(sg) = page;
97 sg_dma_len(sg) = len;
98 sg->offset = offset;
99 if (offset > PAGE_SIZE)
100 panic("sg_set_page: Invalid offset %d\n", offset);
103 #if 0
104 static inline void
105 sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
107 sg_set_page(sg, virt_to_page(buf), buflen,
108 ((uintptr_t)buf) & ~PAGE_MASK);
110 #endif
112 static inline void
113 sg_init_table(struct scatterlist *sg, unsigned int nents)
115 bzero(sg, sizeof(*sg) * nents);
116 sg[nents - 1].flags = SG_END;
119 static inline struct scatterlist *
120 sg_next(struct scatterlist *sg)
122 if (sg->flags & SG_END)
123 return (NULL);
124 sg++;
125 if (sg->flags & SG_CHAIN)
126 sg = sg_scatternext(sg);
127 return (sg);
130 static inline vm_paddr_t
131 sg_phys(struct scatterlist *sg)
133 return sg_page(sg)->phys_addr + sg->offset;
137 * sg_chain - Chain two sglists together
138 * @prv: First scatterlist
139 * @prv_nents: Number of entries in prv
140 * @sgl: Second scatterlist
142 * Description:
143 * Links @prv@ and @sgl@ together, to form a longer scatterlist.
146 static inline void
147 sg_chain(struct scatterlist *prv, unsigned int prv_nents,
148 struct scatterlist *sgl)
151 * offset and length are unused for chain entry. Clear them.
153 struct scatterlist *sg = &prv[prv_nents - 1];
155 sg->offset = 0;
156 sg->length = 0;
159 * Indicate a link pointer, and set the link to the second list.
161 sg->flags = SG_CHAIN;
162 sg->sl_un.sg = sgl;
166 * sg_mark_end - Mark the end of the scatterlist
167 * @sg: SG entryScatterlist
169 * Description:
170 * Marks the passed in sg entry as the termination point for the sg
171 * table. A call to sg_next() on this entry will return NULL.
174 static inline void sg_mark_end(struct scatterlist *sg)
176 sg->flags = SG_END;
180 * __sg_free_table - Free a previously mapped sg table
181 * @table: The sg table header to use
182 * @max_ents: The maximum number of entries per single scatterlist
184 * Description:
185 * Free an sg table previously allocated and setup with
186 * __sg_alloc_table(). The @max_ents value must be identical to
187 * that previously used with __sg_alloc_table().
190 static inline void
191 __sg_free_table(struct sg_table *table, unsigned int max_ents)
193 struct scatterlist *sgl, *next;
195 if (unlikely(!table->sgl))
196 return;
198 sgl = table->sgl;
199 while (table->orig_nents) {
200 unsigned int alloc_size = table->orig_nents;
201 unsigned int sg_size;
204 * If we have more than max_ents segments left,
205 * then assign 'next' to the sg table after the current one.
206 * sg_size is then one less than alloc size, since the last
207 * element is the chain pointer.
209 if (alloc_size > max_ents) {
210 next = sgl[max_ents - 1].sl_un.sg;
211 alloc_size = max_ents;
212 sg_size = alloc_size - 1;
213 } else {
214 sg_size = alloc_size;
215 next = NULL;
218 table->orig_nents -= sg_size;
219 kfree(sgl);
220 sgl = next;
223 table->sgl = NULL;
227 * sg_free_table - Free a previously allocated sg table
228 * @table: The mapped sg table header
231 static inline void
232 sg_free_table(struct sg_table *table)
234 __sg_free_table(table, SG_MAX_SINGLE_ALLOC);
238 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
239 * @table: The sg table header to use
240 * @nents: Number of entries in sg list
241 * @max_ents: The maximum number of entries the allocator returns per call
242 * @gfp_mask: GFP allocation mask
244 * Description:
245 * This function returns a @table @nents long. The allocator is
246 * defined to return scatterlist chunks of maximum size @max_ents.
247 * Thus if @nents is bigger than @max_ents, the scatterlists will be
248 * chained in units of @max_ents.
250 * Notes:
251 * If this function returns non-0 (eg failure), the caller must call
252 * __sg_free_table() to cleanup any leftover allocations.
255 static inline int
256 __sg_alloc_table(struct sg_table *table, unsigned int nents,
257 unsigned int max_ents, gfp_t gfp_mask)
259 struct scatterlist *sg, *prv;
260 unsigned int left;
262 memset(table, 0, sizeof(*table));
264 if (nents == 0)
265 return -EINVAL;
266 left = nents;
267 prv = NULL;
268 do {
269 unsigned int sg_size, alloc_size = left;
271 if (alloc_size > max_ents) {
272 alloc_size = max_ents;
273 sg_size = alloc_size - 1;
274 } else
275 sg_size = alloc_size;
277 left -= sg_size;
279 sg = kmalloc(alloc_size * sizeof(struct scatterlist), M_DRM, gfp_mask);
280 if (unlikely(!sg)) {
282 * Adjust entry count to reflect that the last
283 * entry of the previous table won't be used for
284 * linkage. Without this, sg_kfree() may get
285 * confused.
287 if (prv)
288 table->nents = ++table->orig_nents;
290 return -ENOMEM;
293 sg_init_table(sg, alloc_size);
294 table->nents = table->orig_nents += sg_size;
297 * If this is the first mapping, assign the sg table header.
298 * If this is not the first mapping, chain previous part.
300 if (prv)
301 sg_chain(prv, max_ents, sg);
302 else
303 table->sgl = sg;
306 * If no more entries after this one, mark the end
308 if (!left)
309 sg_mark_end(&sg[sg_size - 1]);
311 prv = sg;
312 } while (left);
314 return 0;
318 * sg_alloc_table - Allocate and initialize an sg table
319 * @table: The sg table header to use
320 * @nents: Number of entries in sg list
321 * @gfp_mask: GFP allocation mask
323 * Description:
324 * Allocate and initialize an sg table. If @nents@ is larger than
325 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
329 static inline int
330 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
332 int ret;
334 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
335 gfp_mask);
336 if (unlikely(ret))
337 __sg_free_table(table, SG_MAX_SINGLE_ALLOC);
339 return ret;
343 * Iterate pages in sg list.
345 static inline void
346 _sg_iter_next(struct sg_page_iter *iter)
348 struct scatterlist *sg;
349 unsigned int pgcount;
351 sg = iter->sg;
352 pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
354 ++iter->sg_pgoffset;
355 while (iter->sg_pgoffset >= pgcount) {
356 iter->sg_pgoffset -= pgcount;
357 sg = sg_next(sg);
358 --iter->maxents;
359 if (sg == NULL || iter->maxents == 0)
360 break;
361 pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
363 iter->sg = sg;
367 * NOTE: pgoffset is really a page index, not a byte offset.
369 static inline void
370 _sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
371 unsigned int nents, unsigned long pgoffset)
373 if (nents) {
375 * Nominal case. Note subtract 1 from starting page index
376 * for initial _sg_iter_next() call.
378 iter->sg = sgl;
379 iter->sg_pgoffset = pgoffset - 1;
380 iter->maxents = nents;
381 _sg_iter_next(iter);
382 } else {
384 * Degenerate case
386 iter->sg = NULL;
387 iter->sg_pgoffset = 0;
388 iter->maxents = 0;
392 static inline struct vm_page *
393 sg_page_iter_page(struct sg_page_iter *piter)
395 return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
398 static inline dma_addr_t
399 sg_page_iter_dma_address(struct sg_page_iter *spi)
401 return spi->sg->dma_address + (spi->sg_pgoffset << PAGE_SHIFT);
404 #define for_each_sg_page(sgl, iter, nents, pgoffset) \
405 for (_sg_iter_init(sgl, iter, nents, pgoffset); \
406 (iter)->sg; _sg_iter_next(iter))
408 #define for_each_sg(sglist, sg, sgmax, _itr) \
409 for (_itr = 0, sg = (sglist); _itr < (sgmax); _itr++, sg = sg_next(sg))
411 #endif /* _LINUX_SCATTERLIST_H_ */