drm: Define struct page and use it everywhere
[dragonfly.git] / sys / dev / drm / linux_scatterlist.c
blob92603464d7458c25129de8f31bbc6f7e7064ac0d
1 /*
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6 * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7 * Copyright (c) 2016 Matt Macy <mmacy@nextbsd.org>
8 * Copyright (c) 2017-2018 François Tigeot <ftigeot@wolfpond.org>
9 * All rights reserved.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice unmodified, this list of conditions, and the following
16 * disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <linux/slab.h>
34 #include <linux/scatterlist.h>
35 #include <linux/highmem.h>
37 /**
38 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
39 * @table: The sg table header to use
40 * @nents: Number of entries in sg list
41 * @max_ents: The maximum number of entries the allocator returns per call
42 * @gfp_mask: GFP allocation mask
44 * Description:
45 * This function returns a @table @nents long. The allocator is
46 * defined to return scatterlist chunks of maximum size @max_ents.
47 * Thus if @nents is bigger than @max_ents, the scatterlists will be
48 * chained in units of @max_ents.
50 * Notes:
51 * If this function returns non-0 (eg failure), the caller must call
52 * __sg_free_table() to cleanup any leftover allocations.
54 **/
55 int
56 __sg_alloc_table(struct sg_table *table, unsigned int nents,
57 unsigned int max_ents, gfp_t gfp_mask)
59 struct scatterlist *sg, *prv;
60 unsigned int left;
62 memset(table, 0, sizeof(*table));
64 if (nents == 0)
65 return -EINVAL;
66 left = nents;
67 prv = NULL;
68 do {
69 unsigned int sg_size, alloc_size = left;
71 if (alloc_size > max_ents) {
72 alloc_size = max_ents;
73 sg_size = alloc_size - 1;
74 } else
75 sg_size = alloc_size;
77 left -= sg_size;
79 sg = kmalloc(alloc_size * sizeof(struct scatterlist), M_DRM, gfp_mask);
80 if (unlikely(!sg)) {
82 * Adjust entry count to reflect that the last
83 * entry of the previous table won't be used for
84 * linkage. Without this, sg_kfree() may get
85 * confused.
87 if (prv)
88 table->nents = ++table->orig_nents;
90 return -ENOMEM;
93 sg_init_table(sg, alloc_size);
94 table->nents = table->orig_nents += sg_size;
97 * If this is the first mapping, assign the sg table header.
98 * If this is not the first mapping, chain previous part.
100 if (prv)
101 sg_chain(prv, max_ents, sg);
102 else
103 table->sgl = sg;
106 * If no more entries after this one, mark the end
108 if (!left)
109 sg_mark_end(&sg[sg_size - 1]);
111 prv = sg;
112 } while (left);
114 return 0;
117 void
118 __sg_free_table(struct sg_table *table, unsigned int max_ents)
120 struct scatterlist *sgl, *next;
122 if (unlikely(!table->sgl))
123 return;
125 sgl = table->sgl;
126 while (table->orig_nents) {
127 unsigned int alloc_size = table->orig_nents;
128 unsigned int sg_size;
131 * If we have more than max_ents segments left,
132 * then assign 'next' to the sg table after the current one.
133 * sg_size is then one less than alloc size, since the last
134 * element is the chain pointer.
136 if (alloc_size > max_ents) {
137 next = sgl[max_ents - 1].sl_un.sg;
138 alloc_size = max_ents;
139 sg_size = alloc_size - 1;
140 } else {
141 sg_size = alloc_size;
142 next = NULL;
145 table->orig_nents -= sg_size;
146 kfree(sgl);
147 sgl = next;
150 table->sgl = NULL;
153 size_t
154 sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
155 const void *buf, size_t buflen, off_t skip)
157 off_t off;
158 int len, curlen, curoff;
159 struct sg_page_iter iter;
160 struct scatterlist *sg;
161 struct page *page;
162 char *vaddr;
164 off = 0;
165 for_each_sg_page(sgl, &iter, nents, 0) {
166 sg = iter.sg;
167 curlen = sg->length;
168 curoff = sg->offset;
169 if (skip && curlen >= skip) {
170 skip -= curlen;
171 continue;
173 if (skip) {
174 curlen -= skip;
175 curoff += skip;
176 skip = 0;
178 len = min(curlen, buflen - off);
179 page = sg_page_iter_page(&iter);
180 vaddr = (char *)kmap(page) + sg->offset;
181 memcpy(vaddr, (const char *)buf + off, len);
182 off += len;
183 kunmap(page);
186 return (off);
189 size_t
190 sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
191 void *buf, size_t buflen, off_t skip)
193 off_t off;
194 int len, curlen, curoff;
195 struct sg_page_iter iter;
196 struct scatterlist *sg;
197 struct page *page;
198 char *vaddr;
200 off = 0;
201 for_each_sg_page(sgl, &iter, nents, 0) {
202 sg = iter.sg;
203 curlen = sg->length;
204 curoff = sg->offset;
205 if (skip && curlen >= skip) {
206 skip -= curlen;
207 continue;
209 if (skip) {
210 curlen -= skip;
211 curoff += skip;
212 skip = 0;
214 len = min(curlen, buflen - off);
215 page = sg_page_iter_page(&iter);
216 vaddr = (char *)kmap(page) + sg->offset;
217 memcpy((char *)buf + off, vaddr, len);
218 off += len;
219 kunmap(page);
222 return (off);