RT-AC56 3.0.0.4.374.37 core
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / fs / exofs / ios.c
blobfbfb6bfb9153e3f6bc2ebc637d8753e049ff7af9
1 /*
2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2008, 2009
5 * Boaz Harrosh <bharrosh@panasas.com>
7 * This file is part of exofs.
9 * exofs is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation. Since it is based on ext2, and the only
12 * valid version of GPL for the Linux kernel is version 2, the only valid
13 * version of GPL for exofs is version 2.
15 * exofs is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with exofs; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 #include <linux/slab.h>
26 #include <scsi/scsi_device.h>
27 #include <asm/div64.h>
29 #include "exofs.h"
31 #define EXOFS_DBGMSG2(M...) do {} while (0)
32 /* #define EXOFS_DBGMSG2 EXOFS_DBGMSG */
34 void exofs_make_credential(u8 cred_a[OSD_CAP_LEN], const struct osd_obj_id *obj)
36 osd_sec_init_nosec_doall_caps(cred_a, obj, false, true);
39 int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj,
40 u64 offset, void *p, unsigned length)
42 struct osd_request *or = osd_start_request(od, GFP_KERNEL);
43 /* struct osd_sense_info osi = {.key = 0};*/
44 int ret;
46 if (unlikely(!or)) {
47 EXOFS_DBGMSG("%s: osd_start_request failed.\n", __func__);
48 return -ENOMEM;
50 ret = osd_req_read_kern(or, obj, offset, p, length);
51 if (unlikely(ret)) {
52 EXOFS_DBGMSG("%s: osd_req_read_kern failed.\n", __func__);
53 goto out;
56 ret = osd_finalize_request(or, 0, cred, NULL);
57 if (unlikely(ret)) {
58 EXOFS_DBGMSG("Faild to osd_finalize_request() => %d\n", ret);
59 goto out;
62 ret = osd_execute_request(or);
63 if (unlikely(ret))
64 EXOFS_DBGMSG("osd_execute_request() => %d\n", ret);
65 /* osd_req_decode_sense(or, ret); */
67 out:
68 osd_end_request(or);
69 return ret;
72 int exofs_get_io_state(struct exofs_layout *layout,
73 struct exofs_io_state **pios)
75 struct exofs_io_state *ios;
77 /*TODO: Maybe use kmem_cach per sbi of size
78 * exofs_io_state_size(layout->s_numdevs)
80 ios = kzalloc(exofs_io_state_size(layout->s_numdevs), GFP_KERNEL);
81 if (unlikely(!ios)) {
82 EXOFS_DBGMSG("Faild kzalloc bytes=%d\n",
83 exofs_io_state_size(layout->s_numdevs));
84 *pios = NULL;
85 return -ENOMEM;
88 ios->layout = layout;
89 ios->obj.partition = layout->s_pid;
90 *pios = ios;
91 return 0;
94 void exofs_put_io_state(struct exofs_io_state *ios)
96 if (ios) {
97 unsigned i;
99 for (i = 0; i < ios->numdevs; i++) {
100 struct exofs_per_dev_state *per_dev = &ios->per_dev[i];
102 if (per_dev->or)
103 osd_end_request(per_dev->or);
104 if (per_dev->bio)
105 bio_put(per_dev->bio);
108 kfree(ios);
112 unsigned exofs_layout_od_id(struct exofs_layout *layout,
113 osd_id obj_no, unsigned layout_index)
115 /* switch (layout->lay_func) {
116 case LAYOUT_MOVING_WINDOW:
118 unsigned dev_mod = obj_no;
120 return (layout_index + dev_mod * layout->mirrors_p1) %
121 layout->s_numdevs;
122 /* }
123 case LAYOUT_FUNC_IMPLICT:
124 return layout->devs[layout_index];
128 static inline struct osd_dev *exofs_ios_od(struct exofs_io_state *ios,
129 unsigned layout_index)
131 return ios->layout->s_ods[
132 exofs_layout_od_id(ios->layout, ios->obj.id, layout_index)];
135 static void _sync_done(struct exofs_io_state *ios, void *p)
137 struct completion *waiting = p;
139 complete(waiting);
142 static void _last_io(struct kref *kref)
144 struct exofs_io_state *ios = container_of(
145 kref, struct exofs_io_state, kref);
147 ios->done(ios, ios->private);
150 static void _done_io(struct osd_request *or, void *p)
152 struct exofs_io_state *ios = p;
154 kref_put(&ios->kref, _last_io);
157 static int exofs_io_execute(struct exofs_io_state *ios)
159 DECLARE_COMPLETION_ONSTACK(wait);
160 bool sync = (ios->done == NULL);
161 int i, ret;
163 if (sync) {
164 ios->done = _sync_done;
165 ios->private = &wait;
168 for (i = 0; i < ios->numdevs; i++) {
169 struct osd_request *or = ios->per_dev[i].or;
170 if (unlikely(!or))
171 continue;
173 ret = osd_finalize_request(or, 0, ios->cred, NULL);
174 if (unlikely(ret)) {
175 EXOFS_DBGMSG("Faild to osd_finalize_request() => %d\n",
176 ret);
177 return ret;
181 kref_init(&ios->kref);
183 for (i = 0; i < ios->numdevs; i++) {
184 struct osd_request *or = ios->per_dev[i].or;
185 if (unlikely(!or))
186 continue;
188 kref_get(&ios->kref);
189 osd_execute_request_async(or, _done_io, ios);
192 kref_put(&ios->kref, _last_io);
193 ret = 0;
195 if (sync) {
196 wait_for_completion(&wait);
197 ret = exofs_check_io(ios, NULL);
199 return ret;
202 static void _clear_bio(struct bio *bio)
204 struct bio_vec *bv;
205 unsigned i;
207 __bio_for_each_segment(bv, bio, i, 0) {
208 unsigned this_count = bv->bv_len;
210 if (likely(PAGE_SIZE == this_count))
211 clear_highpage(bv->bv_page);
212 else
213 zero_user(bv->bv_page, bv->bv_offset, this_count);
217 int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
219 enum osd_err_priority acumulated_osd_err = 0;
220 int acumulated_lin_err = 0;
221 int i;
223 for (i = 0; i < ios->numdevs; i++) {
224 struct osd_sense_info osi;
225 struct osd_request *or = ios->per_dev[i].or;
226 int ret;
228 if (unlikely(!or))
229 continue;
231 ret = osd_req_decode_sense(or, &osi);
232 if (likely(!ret))
233 continue;
235 if (OSD_ERR_PRI_CLEAR_PAGES == osi.osd_err_pri) {
236 /* start read offset passed endof file */
237 _clear_bio(ios->per_dev[i].bio);
238 EXOFS_DBGMSG("start read offset passed end of file "
239 "offset=0x%llx, length=0x%llx\n",
240 _LLU(ios->per_dev[i].offset),
241 _LLU(ios->per_dev[i].length));
243 continue; /* we recovered */
246 if (osi.osd_err_pri >= acumulated_osd_err) {
247 acumulated_osd_err = osi.osd_err_pri;
248 acumulated_lin_err = ret;
252 /* TODO: raid specific residual calculations */
253 if (resid) {
254 if (likely(!acumulated_lin_err))
255 *resid = 0;
256 else
257 *resid = ios->length;
260 return acumulated_lin_err;
264 * L - logical offset into the file
266 * U - The number of bytes in a stripe within a group
268 * U = stripe_unit * group_width
270 * T - The number of bytes striped within a group of component objects
271 * (before advancing to the next group)
273 * T = stripe_unit * group_width * group_depth
275 * S - The number of bytes striped across all component objects
276 * before the pattern repeats
278 * S = stripe_unit * group_width * group_depth * group_count
280 * M - The "major" (i.e., across all components) stripe number
282 * M = L / S
284 * G - Counts the groups from the beginning of the major stripe
286 * G = (L - (M * S)) / T [or (L % S) / T]
288 * H - The byte offset within the group
290 * H = (L - (M * S)) % T [or (L % S) % T]
292 * N - The "minor" (i.e., across the group) stripe number
294 * N = H / U
296 * C - The component index coresponding to L
298 * C = (H - (N * U)) / stripe_unit + G * group_width
299 * [or (L % U) / stripe_unit + G * group_width]
301 * O - The component offset coresponding to L
303 * O = L % stripe_unit + N * stripe_unit + M * group_depth * stripe_unit
305 struct _striping_info {
306 u64 obj_offset;
307 u64 group_length;
308 unsigned dev;
309 unsigned unit_off;
312 static void _calc_stripe_info(struct exofs_io_state *ios, u64 file_offset,
313 struct _striping_info *si)
315 u32 stripe_unit = ios->layout->stripe_unit;
316 u32 group_width = ios->layout->group_width;
317 u64 group_depth = ios->layout->group_depth;
319 u32 U = stripe_unit * group_width;
320 u64 T = U * group_depth;
321 u64 S = T * ios->layout->group_count;
322 u64 M = div64_u64(file_offset, S);
325 G = (L - (M * S)) / T
326 H = (L - (M * S)) % T
328 u64 LmodS = file_offset - M * S;
329 u32 G = div64_u64(LmodS, T);
330 u64 H = LmodS - G * T;
332 u32 N = div_u64(H, U);
334 /* "H - (N * U)" is just "H % U" so it's bound to u32 */
335 si->dev = (u32)(H - (N * U)) / stripe_unit + G * group_width;
336 si->dev *= ios->layout->mirrors_p1;
338 div_u64_rem(file_offset, stripe_unit, &si->unit_off);
340 si->obj_offset = si->unit_off + (N * stripe_unit) +
341 (M * group_depth * stripe_unit);
343 si->group_length = T - H;
346 static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg,
347 unsigned pgbase, struct exofs_per_dev_state *per_dev,
348 int cur_len)
350 unsigned pg = *cur_pg;
351 struct request_queue *q =
352 osd_request_queue(exofs_ios_od(ios, per_dev->dev));
354 per_dev->length += cur_len;
356 if (per_dev->bio == NULL) {
357 unsigned pages_in_stripe = ios->layout->group_width *
358 (ios->layout->stripe_unit / PAGE_SIZE);
359 unsigned bio_size = (ios->nr_pages + pages_in_stripe) /
360 ios->layout->group_width;
362 per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
363 if (unlikely(!per_dev->bio)) {
364 EXOFS_DBGMSG("Faild to allocate BIO size=%u\n",
365 bio_size);
366 return -ENOMEM;
370 while (cur_len > 0) {
371 unsigned pglen = min_t(unsigned, PAGE_SIZE - pgbase, cur_len);
372 unsigned added_len;
374 BUG_ON(ios->nr_pages <= pg);
375 cur_len -= pglen;
377 added_len = bio_add_pc_page(q, per_dev->bio, ios->pages[pg],
378 pglen, pgbase);
379 if (unlikely(pglen != added_len))
380 return -ENOMEM;
381 pgbase = 0;
382 ++pg;
384 BUG_ON(cur_len);
386 *cur_pg = pg;
387 return 0;
390 static int _prepare_one_group(struct exofs_io_state *ios, u64 length,
391 struct _striping_info *si)
393 unsigned stripe_unit = ios->layout->stripe_unit;
394 unsigned mirrors_p1 = ios->layout->mirrors_p1;
395 unsigned devs_in_group = ios->layout->group_width * mirrors_p1;
396 unsigned dev = si->dev;
397 unsigned first_dev = dev - (dev % devs_in_group);
398 unsigned max_comp = ios->numdevs ? ios->numdevs - mirrors_p1 : 0;
399 unsigned cur_pg = ios->pages_consumed;
400 int ret = 0;
402 while (length) {
403 struct exofs_per_dev_state *per_dev = &ios->per_dev[dev];
404 unsigned cur_len, page_off = 0;
406 if (!per_dev->length) {
407 per_dev->dev = dev;
408 if (dev < si->dev) {
409 per_dev->offset = si->obj_offset + stripe_unit -
410 si->unit_off;
411 cur_len = stripe_unit;
412 } else if (dev == si->dev) {
413 per_dev->offset = si->obj_offset;
414 cur_len = stripe_unit - si->unit_off;
415 page_off = si->unit_off & ~PAGE_MASK;
416 BUG_ON(page_off && (page_off != ios->pgbase));
417 } else { /* dev > si->dev */
418 per_dev->offset = si->obj_offset - si->unit_off;
419 cur_len = stripe_unit;
422 if (max_comp < dev)
423 max_comp = dev;
424 } else {
425 cur_len = stripe_unit;
427 if (cur_len >= length)
428 cur_len = length;
430 ret = _add_stripe_unit(ios, &cur_pg, page_off , per_dev,
431 cur_len);
432 if (unlikely(ret))
433 goto out;
435 dev += mirrors_p1;
436 dev = (dev % devs_in_group) + first_dev;
438 length -= cur_len;
440 out:
441 ios->numdevs = max_comp + mirrors_p1;
442 ios->pages_consumed = cur_pg;
443 return ret;
446 static int _prepare_for_striping(struct exofs_io_state *ios)
448 u64 length = ios->length;
449 u64 offset = ios->offset;
450 struct _striping_info si;
451 int ret = 0;
453 if (!ios->pages) {
454 if (ios->kern_buff) {
455 struct exofs_per_dev_state *per_dev = &ios->per_dev[0];
457 _calc_stripe_info(ios, ios->offset, &si);
458 per_dev->offset = si.obj_offset;
459 per_dev->dev = si.dev;
461 /* no cross device without page array */
462 BUG_ON((ios->layout->group_width > 1) &&
463 (si.unit_off + ios->length >
464 ios->layout->stripe_unit));
466 ios->numdevs = ios->layout->mirrors_p1;
467 return 0;
470 while (length) {
471 _calc_stripe_info(ios, offset, &si);
473 if (length < si.group_length)
474 si.group_length = length;
476 ret = _prepare_one_group(ios, si.group_length, &si);
477 if (unlikely(ret))
478 goto out;
480 offset += si.group_length;
481 length -= si.group_length;
484 out:
485 return ret;
488 int exofs_sbi_create(struct exofs_io_state *ios)
490 int i, ret;
492 for (i = 0; i < ios->layout->s_numdevs; i++) {
493 struct osd_request *or;
495 or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL);
496 if (unlikely(!or)) {
497 EXOFS_ERR("%s: osd_start_request failed\n", __func__);
498 ret = -ENOMEM;
499 goto out;
501 ios->per_dev[i].or = or;
502 ios->numdevs++;
504 osd_req_create_object(or, &ios->obj);
506 ret = exofs_io_execute(ios);
508 out:
509 return ret;
512 int exofs_sbi_remove(struct exofs_io_state *ios)
514 int i, ret;
516 for (i = 0; i < ios->layout->s_numdevs; i++) {
517 struct osd_request *or;
519 or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL);
520 if (unlikely(!or)) {
521 EXOFS_ERR("%s: osd_start_request failed\n", __func__);
522 ret = -ENOMEM;
523 goto out;
525 ios->per_dev[i].or = or;
526 ios->numdevs++;
528 osd_req_remove_object(or, &ios->obj);
530 ret = exofs_io_execute(ios);
532 out:
533 return ret;
536 static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
538 struct exofs_per_dev_state *master_dev = &ios->per_dev[cur_comp];
539 unsigned dev = ios->per_dev[cur_comp].dev;
540 unsigned last_comp = cur_comp + ios->layout->mirrors_p1;
541 int ret = 0;
543 if (ios->pages && !master_dev->length)
544 return 0; /* Just an empty slot */
546 for (; cur_comp < last_comp; ++cur_comp, ++dev) {
547 struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
548 struct osd_request *or;
550 or = osd_start_request(exofs_ios_od(ios, dev), GFP_KERNEL);
551 if (unlikely(!or)) {
552 EXOFS_ERR("%s: osd_start_request failed\n", __func__);
553 ret = -ENOMEM;
554 goto out;
556 per_dev->or = or;
557 per_dev->offset = master_dev->offset;
559 if (ios->pages) {
560 struct bio *bio;
562 if (per_dev != master_dev) {
563 bio = bio_kmalloc(GFP_KERNEL,
564 master_dev->bio->bi_max_vecs);
565 if (unlikely(!bio)) {
566 EXOFS_DBGMSG(
567 "Faild to allocate BIO size=%u\n",
568 master_dev->bio->bi_max_vecs);
569 ret = -ENOMEM;
570 goto out;
573 __bio_clone(bio, master_dev->bio);
574 bio->bi_bdev = NULL;
575 bio->bi_next = NULL;
576 per_dev->length = master_dev->length;
577 per_dev->bio = bio;
578 per_dev->dev = dev;
579 } else {
580 bio = master_dev->bio;
581 bio->bi_rw |= REQ_WRITE;
584 osd_req_write(or, &ios->obj, per_dev->offset, bio,
585 per_dev->length);
586 EXOFS_DBGMSG("write(0x%llx) offset=0x%llx "
587 "length=0x%llx dev=%d\n",
588 _LLU(ios->obj.id), _LLU(per_dev->offset),
589 _LLU(per_dev->length), dev);
590 } else if (ios->kern_buff) {
591 ret = osd_req_write_kern(or, &ios->obj, per_dev->offset,
592 ios->kern_buff, ios->length);
593 if (unlikely(ret))
594 goto out;
595 EXOFS_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
596 "length=0x%llx dev=%d\n",
597 _LLU(ios->obj.id), _LLU(per_dev->offset),
598 _LLU(ios->length), dev);
599 } else {
600 osd_req_set_attributes(or, &ios->obj);
601 EXOFS_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
602 _LLU(ios->obj.id), ios->out_attr_len, dev);
605 if (ios->out_attr)
606 osd_req_add_set_attr_list(or, ios->out_attr,
607 ios->out_attr_len);
609 if (ios->in_attr)
610 osd_req_add_get_attr_list(or, ios->in_attr,
611 ios->in_attr_len);
614 out:
615 return ret;
618 int exofs_sbi_write(struct exofs_io_state *ios)
620 int i;
621 int ret;
623 ret = _prepare_for_striping(ios);
624 if (unlikely(ret))
625 return ret;
627 for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
628 ret = _sbi_write_mirror(ios, i);
629 if (unlikely(ret))
630 return ret;
633 ret = exofs_io_execute(ios);
634 return ret;
637 static int _sbi_read_mirror(struct exofs_io_state *ios, unsigned cur_comp)
639 struct osd_request *or;
640 struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
641 unsigned first_dev = (unsigned)ios->obj.id;
643 if (ios->pages && !per_dev->length)
644 return 0; /* Just an empty slot */
646 first_dev = per_dev->dev + first_dev % ios->layout->mirrors_p1;
647 or = osd_start_request(exofs_ios_od(ios, first_dev), GFP_KERNEL);
648 if (unlikely(!or)) {
649 EXOFS_ERR("%s: osd_start_request failed\n", __func__);
650 return -ENOMEM;
652 per_dev->or = or;
654 if (ios->pages) {
655 osd_req_read(or, &ios->obj, per_dev->offset,
656 per_dev->bio, per_dev->length);
657 EXOFS_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx"
658 " dev=%d\n", _LLU(ios->obj.id),
659 _LLU(per_dev->offset), _LLU(per_dev->length),
660 first_dev);
661 } else if (ios->kern_buff) {
662 int ret = osd_req_read_kern(or, &ios->obj, per_dev->offset,
663 ios->kern_buff, ios->length);
664 EXOFS_DBGMSG2("read_kern(0x%llx) offset=0x%llx "
665 "length=0x%llx dev=%d ret=>%d\n",
666 _LLU(ios->obj.id), _LLU(per_dev->offset),
667 _LLU(ios->length), first_dev, ret);
668 if (unlikely(ret))
669 return ret;
670 } else {
671 osd_req_get_attributes(or, &ios->obj);
672 EXOFS_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n",
673 _LLU(ios->obj.id), ios->in_attr_len, first_dev);
675 if (ios->out_attr)
676 osd_req_add_set_attr_list(or, ios->out_attr, ios->out_attr_len);
678 if (ios->in_attr)
679 osd_req_add_get_attr_list(or, ios->in_attr, ios->in_attr_len);
681 return 0;
684 int exofs_sbi_read(struct exofs_io_state *ios)
686 int i;
687 int ret;
689 ret = _prepare_for_striping(ios);
690 if (unlikely(ret))
691 return ret;
693 for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
694 ret = _sbi_read_mirror(ios, i);
695 if (unlikely(ret))
696 return ret;
699 ret = exofs_io_execute(ios);
700 return ret;
703 int extract_attr_from_ios(struct exofs_io_state *ios, struct osd_attr *attr)
705 struct osd_attr cur_attr = {.attr_page = 0}; /* start with zeros */
706 void *iter = NULL;
707 int nelem;
709 do {
710 nelem = 1;
711 osd_req_decode_get_attr_list(ios->per_dev[0].or,
712 &cur_attr, &nelem, &iter);
713 if ((cur_attr.attr_page == attr->attr_page) &&
714 (cur_attr.attr_id == attr->attr_id)) {
715 attr->len = cur_attr.len;
716 attr->val_ptr = cur_attr.val_ptr;
717 return 0;
719 } while (iter);
721 return -EIO;
724 static int _truncate_mirrors(struct exofs_io_state *ios, unsigned cur_comp,
725 struct osd_attr *attr)
727 int last_comp = cur_comp + ios->layout->mirrors_p1;
729 for (; cur_comp < last_comp; ++cur_comp) {
730 struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
731 struct osd_request *or;
733 or = osd_start_request(exofs_ios_od(ios, cur_comp), GFP_KERNEL);
734 if (unlikely(!or)) {
735 EXOFS_ERR("%s: osd_start_request failed\n", __func__);
736 return -ENOMEM;
738 per_dev->or = or;
740 osd_req_set_attributes(or, &ios->obj);
741 osd_req_add_set_attr_list(or, attr, 1);
744 return 0;
747 int exofs_oi_truncate(struct exofs_i_info *oi, u64 size)
749 struct exofs_sb_info *sbi = oi->vfs_inode.i_sb->s_fs_info;
750 struct exofs_io_state *ios;
751 struct exofs_trunc_attr {
752 struct osd_attr attr;
753 __be64 newsize;
754 } *size_attrs;
755 struct _striping_info si;
756 int i, ret;
758 ret = exofs_get_io_state(&sbi->layout, &ios);
759 if (unlikely(ret))
760 return ret;
762 size_attrs = kcalloc(ios->layout->group_width, sizeof(*size_attrs),
763 GFP_KERNEL);
764 if (unlikely(!size_attrs)) {
765 ret = -ENOMEM;
766 goto out;
769 ios->obj.id = exofs_oi_objno(oi);
770 ios->cred = oi->i_cred;
772 ios->numdevs = ios->layout->s_numdevs;
773 _calc_stripe_info(ios, size, &si);
775 for (i = 0; i < ios->layout->group_width; ++i) {
776 struct exofs_trunc_attr *size_attr = &size_attrs[i];
777 u64 obj_size;
779 if (i < si.dev)
780 obj_size = si.obj_offset +
781 ios->layout->stripe_unit - si.unit_off;
782 else if (i == si.dev)
783 obj_size = si.obj_offset;
784 else /* i > si.dev */
785 obj_size = si.obj_offset - si.unit_off;
787 size_attr->newsize = cpu_to_be64(obj_size);
788 size_attr->attr = g_attr_logical_length;
789 size_attr->attr.val_ptr = &size_attr->newsize;
791 ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
792 &size_attr->attr);
793 if (unlikely(ret))
794 goto out;
796 ret = exofs_io_execute(ios);
798 out:
799 kfree(size_attrs);
800 exofs_put_io_state(ios);
801 return ret;