1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <commonlib/helpers.h>
4 #include <commonlib/region.h>
7 int region_is_subregion(const struct region
*p
, const struct region
*c
)
9 if (region_offset(c
) < region_offset(p
))
12 if (region_end(c
) > region_end(p
))
15 if (region_end(c
) < region_offset(c
))
21 static int normalize_and_ok(const struct region
*outer
, struct region
*inner
)
23 inner
->offset
+= region_offset(outer
);
24 return region_is_subregion(outer
, inner
);
27 static const struct region_device
*rdev_root(const struct region_device
*rdev
)
29 if (rdev
->root
== NULL
)
34 ssize_t
rdev_relative_offset(const struct region_device
*p
,
35 const struct region_device
*c
)
37 if (rdev_root(p
) != rdev_root(c
))
40 if (!region_is_subregion(&p
->region
, &c
->region
))
43 return region_device_offset(c
) - region_device_offset(p
);
46 void *rdev_mmap(const struct region_device
*rd
, size_t offset
, size_t size
)
48 const struct region_device
*rdev
;
54 if (!normalize_and_ok(&rd
->region
, &req
))
59 if (rdev
->ops
->mmap
== NULL
)
62 return rdev
->ops
->mmap(rdev
, req
.offset
, req
.size
);
65 int rdev_munmap(const struct region_device
*rd
, void *mapping
)
67 const struct region_device
*rdev
;
71 if (rdev
->ops
->munmap
== NULL
)
74 return rdev
->ops
->munmap(rdev
, mapping
);
77 ssize_t
rdev_readat(const struct region_device
*rd
, void *b
, size_t offset
,
80 const struct region_device
*rdev
;
86 if (!normalize_and_ok(&rd
->region
, &req
))
91 return rdev
->ops
->readat(rdev
, b
, req
.offset
, req
.size
);
94 ssize_t
rdev_writeat(const struct region_device
*rd
, const void *b
,
95 size_t offset
, size_t size
)
97 const struct region_device
*rdev
;
103 if (!normalize_and_ok(&rd
->region
, &req
))
106 rdev
= rdev_root(rd
);
108 if (rdev
->ops
->writeat
== NULL
)
111 return rdev
->ops
->writeat(rdev
, b
, req
.offset
, req
.size
);
114 ssize_t
rdev_eraseat(const struct region_device
*rd
, size_t offset
,
117 const struct region_device
*rdev
;
118 struct region req
= {
123 if (!normalize_and_ok(&rd
->region
, &req
))
126 rdev
= rdev_root(rd
);
128 /* If the eraseat ptr is NULL we assume that the erase
129 * function was completed successfully. */
130 if (rdev
->ops
->eraseat
== NULL
)
133 return rdev
->ops
->eraseat(rdev
, req
.offset
, req
.size
);
136 int rdev_chain(struct region_device
*child
, const struct region_device
*parent
,
137 size_t offset
, size_t size
)
139 struct region req
= {
144 if (!normalize_and_ok(&parent
->region
, &req
))
147 /* Keep track of root region device. Note the offsets are relative
148 * to the root device. */
149 child
->root
= rdev_root(parent
);
151 child
->region
.offset
= req
.offset
;
152 child
->region
.size
= req
.size
;
157 static void mem_region_device_init(struct mem_region_device
*mdev
,
158 const struct region_device_ops
*ops
, void *base
, size_t size
)
160 memset(mdev
, 0, sizeof(*mdev
));
162 mdev
->rdev
.ops
= ops
;
163 mdev
->rdev
.region
.size
= size
;
166 void mem_region_device_ro_init(struct mem_region_device
*mdev
, void *base
,
169 return mem_region_device_init(mdev
, &mem_rdev_ro_ops
, base
, size
);
172 void mem_region_device_rw_init(struct mem_region_device
*mdev
, void *base
,
175 return mem_region_device_init(mdev
, &mem_rdev_rw_ops
, base
, size
);
178 void region_device_init(struct region_device
*rdev
,
179 const struct region_device_ops
*ops
, size_t offset
,
182 memset(rdev
, 0, sizeof(*rdev
));
185 rdev
->region
.offset
= offset
;
186 rdev
->region
.size
= size
;
189 static void xlate_region_device_init(struct xlate_region_device
*xdev
,
190 const struct region_device_ops
*ops
,
191 size_t window_count
, const struct xlate_window
*window_arr
,
194 memset(xdev
, 0, sizeof(*xdev
));
195 xdev
->window_count
= window_count
;
196 xdev
->window_arr
= window_arr
;
197 region_device_init(&xdev
->rdev
, ops
, 0, parent_size
);
200 void xlate_region_device_ro_init(struct xlate_region_device
*xdev
,
201 size_t window_count
, const struct xlate_window
*window_arr
,
204 xlate_region_device_init(xdev
, &xlate_rdev_ro_ops
, window_count
, window_arr
,
208 void xlate_region_device_rw_init(struct xlate_region_device
*xdev
,
209 size_t window_count
, const struct xlate_window
*window_arr
,
212 xlate_region_device_init(xdev
, &xlate_rdev_rw_ops
, window_count
, window_arr
,
216 void xlate_window_init(struct xlate_window
*window
, const struct region_device
*access_dev
,
217 size_t sub_region_offset
, size_t sub_region_size
)
219 window
->access_dev
= access_dev
;
220 window
->sub_region
.offset
= sub_region_offset
;
221 window
->sub_region
.size
= sub_region_size
;
224 static void *mdev_mmap(const struct region_device
*rd
, size_t offset
,
225 size_t size __unused
)
227 const struct mem_region_device
*mdev
;
229 mdev
= container_of(rd
, __typeof__(*mdev
), rdev
);
231 return &mdev
->base
[offset
];
234 static int mdev_munmap(const struct region_device
*rd __unused
,
235 void *mapping __unused
)
240 static ssize_t
mdev_readat(const struct region_device
*rd
, void *b
,
241 size_t offset
, size_t size
)
243 const struct mem_region_device
*mdev
;
245 mdev
= container_of(rd
, __typeof__(*mdev
), rdev
);
247 memcpy(b
, &mdev
->base
[offset
], size
);
252 static ssize_t
mdev_writeat(const struct region_device
*rd
, const void *b
,
253 size_t offset
, size_t size
)
255 const struct mem_region_device
*mdev
;
257 mdev
= container_of(rd
, __typeof__(*mdev
), rdev
);
259 memcpy(&mdev
->base
[offset
], b
, size
);
264 static ssize_t
mdev_eraseat(const struct region_device
*rd
, size_t offset
,
267 const struct mem_region_device
*mdev
;
269 mdev
= container_of(rd
, __typeof__(*mdev
), rdev
);
271 memset(&mdev
->base
[offset
], 0, size
);
276 const struct region_device_ops mem_rdev_ro_ops
= {
278 .munmap
= mdev_munmap
,
279 .readat
= mdev_readat
,
282 const struct region_device_ops mem_rdev_rw_ops
= {
284 .munmap
= mdev_munmap
,
285 .readat
= mdev_readat
,
286 .writeat
= mdev_writeat
,
287 .eraseat
= mdev_eraseat
,
290 static const struct mem_region_device mem_rdev
= MEM_REGION_DEV_RO_INIT(0, ~(size_t)0);
291 static const struct mem_region_device mem_rdev_rw
= MEM_REGION_DEV_RW_INIT(0, ~(size_t)0);
293 int rdev_chain_mem(struct region_device
*child
, const void *base
, size_t size
)
295 return rdev_chain(child
, &mem_rdev
.rdev
, (uintptr_t)base
, size
);
298 int rdev_chain_mem_rw(struct region_device
*child
, void *base
, size_t size
)
300 return rdev_chain(child
, &mem_rdev_rw
.rdev
, (uintptr_t)base
, size
);
303 void *mmap_helper_rdev_mmap(const struct region_device
*rd
, size_t offset
,
306 struct mmap_helper_region_device
*mdev
;
309 mdev
= container_of((void *)rd
, __typeof__(*mdev
), rdev
);
311 mapping
= mem_pool_alloc(mdev
->pool
, size
);
316 if (rd
->ops
->readat(rd
, mapping
, offset
, size
) != size
) {
317 mem_pool_free(mdev
->pool
, mapping
);
324 int mmap_helper_rdev_munmap(const struct region_device
*rd
, void *mapping
)
326 struct mmap_helper_region_device
*mdev
;
328 mdev
= container_of((void *)rd
, __typeof__(*mdev
), rdev
);
330 mem_pool_free(mdev
->pool
, mapping
);
335 static const struct xlate_window
*xlate_find_window(const struct xlate_region_device
*xldev
,
336 const struct region
*req
)
339 const struct xlate_window
*xlwindow
;
341 for (i
= 0; i
< xldev
->window_count
; i
++) {
342 xlwindow
= &xldev
->window_arr
[i
];
343 if (region_is_subregion(&xlwindow
->sub_region
, req
))
350 static void *xlate_mmap(const struct region_device
*rd
, size_t offset
,
353 const struct xlate_region_device
*xldev
;
354 struct region req
= {
358 const struct xlate_window
*xlwindow
;
360 xldev
= container_of(rd
, __typeof__(*xldev
), rdev
);
362 xlwindow
= xlate_find_window(xldev
, &req
);
366 offset
-= region_offset(&xlwindow
->sub_region
);
368 return rdev_mmap(xlwindow
->access_dev
, offset
, size
);
371 static int xlate_munmap(const struct region_device
*rd __unused
, void *mapping __unused
)
374 * xlate_region_device does not keep track of the access device that was used to service
375 * a mmap request. So, munmap does not do anything. If munmap functionality is required,
376 * then xlate_region_device will have to be updated to accept some pre-allocated space
377 * from caller to keep track of the mapping requests. Since xlate_region_device is only
378 * used for memory mapped boot media on the backend right now, skipping munmap is fine.
383 static ssize_t
xlate_readat(const struct region_device
*rd
, void *b
,
384 size_t offset
, size_t size
)
386 struct region req
= {
390 const struct xlate_window
*xlwindow
;
391 const struct xlate_region_device
*xldev
;
393 xldev
= container_of(rd
, __typeof__(*xldev
), rdev
);
395 xlwindow
= xlate_find_window(xldev
, &req
);
399 offset
-= region_offset(&xlwindow
->sub_region
);
401 return rdev_readat(xlwindow
->access_dev
, b
, offset
, size
);
404 static ssize_t
xlate_writeat(const struct region_device
*rd
, const void *b
,
405 size_t offset
, size_t size
)
407 struct region req
= {
411 const struct xlate_window
*xlwindow
;
412 const struct xlate_region_device
*xldev
;
414 xldev
= container_of(rd
, __typeof__(*xldev
), rdev
);
416 xlwindow
= xlate_find_window(xldev
, &req
);
420 offset
-= region_offset(&xlwindow
->sub_region
);
422 return rdev_writeat(xlwindow
->access_dev
, b
, offset
, size
);
425 static ssize_t
xlate_eraseat(const struct region_device
*rd
,
426 size_t offset
, size_t size
)
428 struct region req
= {
432 const struct xlate_window
*xlwindow
;
433 const struct xlate_region_device
*xldev
;
435 xldev
= container_of(rd
, __typeof__(*xldev
), rdev
);
437 xlwindow
= xlate_find_window(xldev
, &req
);
441 offset
-= region_offset(&xlwindow
->sub_region
);
443 return rdev_eraseat(xlwindow
->access_dev
, offset
, size
);
446 const struct region_device_ops xlate_rdev_ro_ops
= {
448 .munmap
= xlate_munmap
,
449 .readat
= xlate_readat
,
452 const struct region_device_ops xlate_rdev_rw_ops
= {
454 .munmap
= xlate_munmap
,
455 .readat
= xlate_readat
,
456 .writeat
= xlate_writeat
,
457 .eraseat
= xlate_eraseat
,
460 static void *incoherent_mmap(const struct region_device
*rd
, size_t offset
,
463 const struct incoherent_rdev
*irdev
;
465 irdev
= container_of(rd
, const struct incoherent_rdev
, rdev
);
467 return rdev_mmap(irdev
->read
, offset
, size
);
470 static int incoherent_munmap(const struct region_device
*rd
, void *mapping
)
472 const struct incoherent_rdev
*irdev
;
474 irdev
= container_of(rd
, const struct incoherent_rdev
, rdev
);
476 return rdev_munmap(irdev
->read
, mapping
);
479 static ssize_t
incoherent_readat(const struct region_device
*rd
, void *b
,
480 size_t offset
, size_t size
)
482 const struct incoherent_rdev
*irdev
;
484 irdev
= container_of(rd
, const struct incoherent_rdev
, rdev
);
486 return rdev_readat(irdev
->read
, b
, offset
, size
);
489 static ssize_t
incoherent_writeat(const struct region_device
*rd
, const void *b
,
490 size_t offset
, size_t size
)
492 const struct incoherent_rdev
*irdev
;
494 irdev
= container_of(rd
, const struct incoherent_rdev
, rdev
);
496 return rdev_writeat(irdev
->write
, b
, offset
, size
);
499 static ssize_t
incoherent_eraseat(const struct region_device
*rd
, size_t offset
,
502 const struct incoherent_rdev
*irdev
;
504 irdev
= container_of(rd
, const struct incoherent_rdev
, rdev
);
506 return rdev_eraseat(irdev
->write
, offset
, size
);
509 static const struct region_device_ops incoherent_rdev_ops
= {
510 .mmap
= incoherent_mmap
,
511 .munmap
= incoherent_munmap
,
512 .readat
= incoherent_readat
,
513 .writeat
= incoherent_writeat
,
514 .eraseat
= incoherent_eraseat
,
517 const struct region_device
*incoherent_rdev_init(struct incoherent_rdev
*irdev
,
518 const struct region
*r
,
519 const struct region_device
*read
,
520 const struct region_device
*write
)
522 const size_t size
= region_sz(r
);
524 if (size
!= region_device_sz(read
) || size
!= region_device_sz(write
))
527 /* The region is represented as offset 0 to size. That way, the generic
528 * rdev operations can be called on the read or write implementation
529 * without any unnecessary translation because the offsets all start
531 region_device_init(&irdev
->rdev
, &incoherent_rdev_ops
, 0, size
);
533 irdev
->write
= write
;