NFSv4.1/flexfile: ff_layout_remove_mirror can be static
[linux-2.6/btrfs-unstable.git] / fs / nfs / flexfilelayout / flexfilelayout.c
blob7fefa8ad9578524d7886b50e68dec991e45e20fe
1 /*
2 * Module for pnfs flexfile layout driver.
4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6 * Tao Peng <bergwolf@primarydata.com>
7 */
9 #include <linux/nfs_fs.h>
10 #include <linux/nfs_page.h>
11 #include <linux/module.h>
13 #include <linux/sunrpc/metrics.h>
15 #include "flexfilelayout.h"
16 #include "../nfs4session.h"
17 #include "../nfs4idmap.h"
18 #include "../internal.h"
19 #include "../delegation.h"
20 #include "../nfs4trace.h"
21 #include "../iostat.h"
22 #include "../nfs.h"
23 #include "../nfs42.h"
25 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
27 #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
29 static struct pnfs_layout_hdr *
30 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
32 struct nfs4_flexfile_layout *ffl;
34 ffl = kzalloc(sizeof(*ffl), gfp_flags);
35 if (ffl) {
36 INIT_LIST_HEAD(&ffl->error_list);
37 INIT_LIST_HEAD(&ffl->mirrors);
38 return &ffl->generic_hdr;
39 } else
40 return NULL;
43 static void
44 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
46 struct nfs4_ff_layout_ds_err *err, *n;
48 list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
49 list) {
50 list_del(&err->list);
51 kfree(err);
53 kfree(FF_LAYOUT_FROM_HDR(lo));
56 static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
58 __be32 *p;
60 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
61 if (unlikely(p == NULL))
62 return -ENOBUFS;
63 memcpy(stateid, p, NFS4_STATEID_SIZE);
64 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
65 p[0], p[1], p[2], p[3]);
66 return 0;
69 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
71 __be32 *p;
73 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
74 if (unlikely(!p))
75 return -ENOBUFS;
76 memcpy(devid, p, NFS4_DEVICEID4_SIZE);
77 nfs4_print_deviceid(devid);
78 return 0;
81 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
83 __be32 *p;
85 p = xdr_inline_decode(xdr, 4);
86 if (unlikely(!p))
87 return -ENOBUFS;
88 fh->size = be32_to_cpup(p++);
89 if (fh->size > sizeof(struct nfs_fh)) {
90 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
91 fh->size);
92 return -EOVERFLOW;
94 /* fh.data */
95 p = xdr_inline_decode(xdr, fh->size);
96 if (unlikely(!p))
97 return -ENOBUFS;
98 memcpy(&fh->data, p, fh->size);
99 dprintk("%s: fh len %d\n", __func__, fh->size);
101 return 0;
105 * Currently only stringified uids and gids are accepted.
106 * I.e., kerberos is not supported to the DSes, so no pricipals.
108 * That means that one common function will suffice, but when
109 * principals are added, this should be split to accomodate
110 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
112 static int
113 decode_name(struct xdr_stream *xdr, u32 *id)
115 __be32 *p;
116 int len;
118 /* opaque_length(4)*/
119 p = xdr_inline_decode(xdr, 4);
120 if (unlikely(!p))
121 return -ENOBUFS;
122 len = be32_to_cpup(p++);
123 if (len < 0)
124 return -EINVAL;
126 dprintk("%s: len %u\n", __func__, len);
128 /* opaque body */
129 p = xdr_inline_decode(xdr, len);
130 if (unlikely(!p))
131 return -ENOBUFS;
133 if (!nfs_map_string_to_numeric((char *)p, len, id))
134 return -EINVAL;
136 return 0;
139 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
140 const struct nfs4_ff_layout_mirror *m2)
142 int i, j;
144 if (m1->fh_versions_cnt != m2->fh_versions_cnt)
145 return false;
146 for (i = 0; i < m1->fh_versions_cnt; i++) {
147 bool found_fh = false;
148 for (j = 0; j < m2->fh_versions_cnt; i++) {
149 if (nfs_compare_fh(&m1->fh_versions[i],
150 &m2->fh_versions[j]) == 0) {
151 found_fh = true;
152 break;
155 if (!found_fh)
156 return false;
158 return true;
161 static struct nfs4_ff_layout_mirror *
162 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
163 struct nfs4_ff_layout_mirror *mirror)
165 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
166 struct nfs4_ff_layout_mirror *pos;
167 struct inode *inode = lo->plh_inode;
169 spin_lock(&inode->i_lock);
170 list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
171 if (mirror->mirror_ds != pos->mirror_ds)
172 continue;
173 if (!ff_mirror_match_fh(mirror, pos))
174 continue;
175 if (atomic_inc_not_zero(&pos->ref)) {
176 spin_unlock(&inode->i_lock);
177 return pos;
180 list_add(&mirror->mirrors, &ff_layout->mirrors);
181 mirror->layout = lo;
182 spin_unlock(&inode->i_lock);
183 return mirror;
186 static void
187 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
189 struct inode *inode;
190 if (mirror->layout == NULL)
191 return;
192 inode = mirror->layout->plh_inode;
193 spin_lock(&inode->i_lock);
194 list_del(&mirror->mirrors);
195 spin_unlock(&inode->i_lock);
196 mirror->layout = NULL;
199 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
201 struct nfs4_ff_layout_mirror *mirror;
203 mirror = kzalloc(sizeof(*mirror), gfp_flags);
204 if (mirror != NULL) {
205 spin_lock_init(&mirror->lock);
206 atomic_set(&mirror->ref, 1);
207 INIT_LIST_HEAD(&mirror->mirrors);
209 return mirror;
212 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
214 ff_layout_remove_mirror(mirror);
215 kfree(mirror->fh_versions);
216 nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
217 kfree(mirror);
220 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
222 if (mirror != NULL && atomic_dec_and_test(&mirror->ref))
223 ff_layout_free_mirror(mirror);
226 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
228 int i;
230 if (fls->mirror_array) {
231 for (i = 0; i < fls->mirror_array_cnt; i++) {
232 /* normally mirror_ds is freed in
233 * .free_deviceid_node but we still do it here
234 * for .alloc_lseg error path */
235 ff_layout_put_mirror(fls->mirror_array[i]);
237 kfree(fls->mirror_array);
238 fls->mirror_array = NULL;
242 static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
244 int ret = 0;
246 dprintk("--> %s\n", __func__);
248 /* FIXME: remove this check when layout segment support is added */
249 if (lgr->range.offset != 0 ||
250 lgr->range.length != NFS4_MAX_UINT64) {
251 dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
252 __func__);
253 ret = -EINVAL;
256 dprintk("--> %s returns %d\n", __func__, ret);
257 return ret;
260 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
262 if (fls) {
263 ff_layout_free_mirror_array(fls);
264 kfree(fls);
268 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
270 int i, j;
272 for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
273 for (j = i + 1; j < fls->mirror_array_cnt; j++)
274 if (fls->mirror_array[i]->efficiency <
275 fls->mirror_array[j]->efficiency)
276 swap(fls->mirror_array[i],
277 fls->mirror_array[j]);
281 static struct pnfs_layout_segment *
282 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
283 struct nfs4_layoutget_res *lgr,
284 gfp_t gfp_flags)
286 struct pnfs_layout_segment *ret;
287 struct nfs4_ff_layout_segment *fls = NULL;
288 struct xdr_stream stream;
289 struct xdr_buf buf;
290 struct page *scratch;
291 u64 stripe_unit;
292 u32 mirror_array_cnt;
293 __be32 *p;
294 int i, rc;
296 dprintk("--> %s\n", __func__);
297 scratch = alloc_page(gfp_flags);
298 if (!scratch)
299 return ERR_PTR(-ENOMEM);
301 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
302 lgr->layoutp->len);
303 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
305 /* stripe unit and mirror_array_cnt */
306 rc = -EIO;
307 p = xdr_inline_decode(&stream, 8 + 4);
308 if (!p)
309 goto out_err_free;
311 p = xdr_decode_hyper(p, &stripe_unit);
312 mirror_array_cnt = be32_to_cpup(p++);
313 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
314 stripe_unit, mirror_array_cnt);
316 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
317 mirror_array_cnt == 0)
318 goto out_err_free;
320 rc = -ENOMEM;
321 fls = kzalloc(sizeof(*fls), gfp_flags);
322 if (!fls)
323 goto out_err_free;
325 fls->mirror_array_cnt = mirror_array_cnt;
326 fls->stripe_unit = stripe_unit;
327 fls->mirror_array = kcalloc(fls->mirror_array_cnt,
328 sizeof(fls->mirror_array[0]), gfp_flags);
329 if (fls->mirror_array == NULL)
330 goto out_err_free;
332 for (i = 0; i < fls->mirror_array_cnt; i++) {
333 struct nfs4_ff_layout_mirror *mirror;
334 struct nfs4_deviceid devid;
335 struct nfs4_deviceid_node *idnode;
336 u32 ds_count;
337 u32 fh_count;
338 int j;
340 rc = -EIO;
341 p = xdr_inline_decode(&stream, 4);
342 if (!p)
343 goto out_err_free;
344 ds_count = be32_to_cpup(p);
346 /* FIXME: allow for striping? */
347 if (ds_count != 1)
348 goto out_err_free;
350 fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
351 if (fls->mirror_array[i] == NULL) {
352 rc = -ENOMEM;
353 goto out_err_free;
356 fls->mirror_array[i]->ds_count = ds_count;
358 /* deviceid */
359 rc = decode_deviceid(&stream, &devid);
360 if (rc)
361 goto out_err_free;
363 idnode = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
364 &devid, lh->plh_lc_cred,
365 gfp_flags);
367 * upon success, mirror_ds is allocated by previous
368 * getdeviceinfo, or newly by .alloc_deviceid_node
369 * nfs4_find_get_deviceid failure is indeed getdeviceinfo falure
371 if (idnode)
372 fls->mirror_array[i]->mirror_ds =
373 FF_LAYOUT_MIRROR_DS(idnode);
374 else
375 goto out_err_free;
377 /* efficiency */
378 rc = -EIO;
379 p = xdr_inline_decode(&stream, 4);
380 if (!p)
381 goto out_err_free;
382 fls->mirror_array[i]->efficiency = be32_to_cpup(p);
384 /* stateid */
385 rc = decode_stateid(&stream, &fls->mirror_array[i]->stateid);
386 if (rc)
387 goto out_err_free;
389 /* fh */
390 p = xdr_inline_decode(&stream, 4);
391 if (!p)
392 goto out_err_free;
393 fh_count = be32_to_cpup(p);
395 fls->mirror_array[i]->fh_versions =
396 kzalloc(fh_count * sizeof(struct nfs_fh),
397 gfp_flags);
398 if (fls->mirror_array[i]->fh_versions == NULL) {
399 rc = -ENOMEM;
400 goto out_err_free;
403 for (j = 0; j < fh_count; j++) {
404 rc = decode_nfs_fh(&stream,
405 &fls->mirror_array[i]->fh_versions[j]);
406 if (rc)
407 goto out_err_free;
410 fls->mirror_array[i]->fh_versions_cnt = fh_count;
412 /* user */
413 rc = decode_name(&stream, &fls->mirror_array[i]->uid);
414 if (rc)
415 goto out_err_free;
417 /* group */
418 rc = decode_name(&stream, &fls->mirror_array[i]->gid);
419 if (rc)
420 goto out_err_free;
422 mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
423 if (mirror != fls->mirror_array[i]) {
424 ff_layout_free_mirror(fls->mirror_array[i]);
425 fls->mirror_array[i] = mirror;
428 dprintk("%s: uid %d gid %d\n", __func__,
429 fls->mirror_array[i]->uid,
430 fls->mirror_array[i]->gid);
433 p = xdr_inline_decode(&stream, 4);
434 if (p)
435 fls->flags = be32_to_cpup(p);
437 ff_layout_sort_mirrors(fls);
438 rc = ff_layout_check_layout(lgr);
439 if (rc)
440 goto out_err_free;
442 ret = &fls->generic_hdr;
443 dprintk("<-- %s (success)\n", __func__);
444 out_free_page:
445 __free_page(scratch);
446 return ret;
447 out_err_free:
448 _ff_layout_free_lseg(fls);
449 ret = ERR_PTR(rc);
450 dprintk("<-- %s (%d)\n", __func__, rc);
451 goto out_free_page;
454 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
456 struct pnfs_layout_segment *lseg;
458 list_for_each_entry(lseg, &layout->plh_segs, pls_list)
459 if (lseg->pls_range.iomode == IOMODE_RW)
460 return true;
462 return false;
465 static void
466 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
468 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
469 int i;
471 dprintk("--> %s\n", __func__);
473 for (i = 0; i < fls->mirror_array_cnt; i++) {
474 if (fls->mirror_array[i]) {
475 nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds);
476 fls->mirror_array[i]->mirror_ds = NULL;
477 if (fls->mirror_array[i]->cred) {
478 put_rpccred(fls->mirror_array[i]->cred);
479 fls->mirror_array[i]->cred = NULL;
484 if (lseg->pls_range.iomode == IOMODE_RW) {
485 struct nfs4_flexfile_layout *ffl;
486 struct inode *inode;
488 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
489 inode = ffl->generic_hdr.plh_inode;
490 spin_lock(&inode->i_lock);
491 if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
492 ffl->commit_info.nbuckets = 0;
493 kfree(ffl->commit_info.buckets);
494 ffl->commit_info.buckets = NULL;
496 spin_unlock(&inode->i_lock);
498 _ff_layout_free_lseg(fls);
501 /* Return 1 until we have multiple lsegs support */
502 static int
503 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
505 return 1;
508 static void
509 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
511 /* first IO request? */
512 if (atomic_inc_return(&timer->n_ops) == 1) {
513 timer->start_time = now;
517 static ktime_t
518 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
520 ktime_t start;
522 if (atomic_dec_return(&timer->n_ops) < 0)
523 WARN_ON_ONCE(1);
525 start = timer->start_time;
526 timer->start_time = now;
527 return ktime_sub(now, start);
530 static bool
531 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
532 struct nfs4_ff_layoutstat *layoutstat,
533 ktime_t now)
535 static const ktime_t notime = {0};
536 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
538 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
539 if (ktime_equal(mirror->start_time, notime))
540 mirror->start_time = now;
541 if (ktime_equal(mirror->last_report_time, notime))
542 mirror->last_report_time = now;
543 if (layoutstats_timer != 0)
544 report_interval = (s64)layoutstats_timer * 1000LL;
545 if (ktime_to_ms(ktime_sub(now, mirror->last_report_time)) >=
546 report_interval) {
547 mirror->last_report_time = now;
548 return true;
551 return false;
554 static void
555 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
556 __u64 requested)
558 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
560 iostat->ops_requested++;
561 iostat->bytes_requested += requested;
564 static void
565 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
566 __u64 requested,
567 __u64 completed,
568 ktime_t time_completed,
569 ktime_t time_started)
571 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
572 ktime_t completion_time = ktime_sub(time_completed, time_started);
573 ktime_t timer;
575 iostat->ops_completed++;
576 iostat->bytes_completed += completed;
577 iostat->bytes_not_delivered += requested - completed;
579 timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
580 iostat->total_busy_time =
581 ktime_add(iostat->total_busy_time, timer);
582 iostat->aggregate_completion_time =
583 ktime_add(iostat->aggregate_completion_time,
584 completion_time);
587 static void
588 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
589 struct nfs4_ff_layout_mirror *mirror,
590 __u64 requested, ktime_t now)
592 bool report;
594 spin_lock(&mirror->lock);
595 report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
596 nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
597 spin_unlock(&mirror->lock);
599 if (report)
600 pnfs_report_layoutstat(inode, GFP_KERNEL);
603 static void
604 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
605 struct nfs4_ff_layout_mirror *mirror,
606 __u64 requested,
607 __u64 completed)
609 spin_lock(&mirror->lock);
610 nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
611 requested, completed,
612 ktime_get(), task->tk_start);
613 spin_unlock(&mirror->lock);
616 static void
617 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
618 struct nfs4_ff_layout_mirror *mirror,
619 __u64 requested, ktime_t now)
621 bool report;
623 spin_lock(&mirror->lock);
624 report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
625 nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
626 spin_unlock(&mirror->lock);
628 if (report)
629 pnfs_report_layoutstat(inode, GFP_NOIO);
632 static void
633 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
634 struct nfs4_ff_layout_mirror *mirror,
635 __u64 requested,
636 __u64 completed,
637 enum nfs3_stable_how committed)
639 if (committed == NFS_UNSTABLE)
640 requested = completed = 0;
642 spin_lock(&mirror->lock);
643 nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
644 requested, completed, ktime_get(), task->tk_start);
645 spin_unlock(&mirror->lock);
648 static int
649 ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
650 struct nfs_commit_info *cinfo,
651 gfp_t gfp_flags)
653 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
654 struct pnfs_commit_bucket *buckets;
655 int size;
657 if (cinfo->ds->nbuckets != 0) {
658 /* This assumes there is only one RW lseg per file.
659 * To support multiple lseg per file, we need to
660 * change struct pnfs_commit_bucket to allow dynamic
661 * increasing nbuckets.
663 return 0;
666 size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
668 buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
669 gfp_flags);
670 if (!buckets)
671 return -ENOMEM;
672 else {
673 int i;
675 spin_lock(cinfo->lock);
676 if (cinfo->ds->nbuckets != 0)
677 kfree(buckets);
678 else {
679 cinfo->ds->buckets = buckets;
680 cinfo->ds->nbuckets = size;
681 for (i = 0; i < size; i++) {
682 INIT_LIST_HEAD(&buckets[i].written);
683 INIT_LIST_HEAD(&buckets[i].committing);
684 /* mark direct verifier as unset */
685 buckets[i].direct_verf.committed =
686 NFS_INVALID_STABLE_HOW;
689 spin_unlock(cinfo->lock);
690 return 0;
694 static struct nfs4_pnfs_ds *
695 ff_layout_choose_best_ds_for_read(struct nfs_pageio_descriptor *pgio,
696 int *best_idx)
698 struct nfs4_ff_layout_segment *fls;
699 struct nfs4_pnfs_ds *ds;
700 int idx;
702 fls = FF_LAYOUT_LSEG(pgio->pg_lseg);
703 /* mirrors are sorted by efficiency */
704 for (idx = 0; idx < fls->mirror_array_cnt; idx++) {
705 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, idx, false);
706 if (ds) {
707 *best_idx = idx;
708 return ds;
712 return NULL;
715 static void
716 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
717 struct nfs_page *req)
719 struct nfs_pgio_mirror *pgm;
720 struct nfs4_ff_layout_mirror *mirror;
721 struct nfs4_pnfs_ds *ds;
722 int ds_idx;
724 /* Use full layout for now */
725 if (!pgio->pg_lseg)
726 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
727 req->wb_context,
729 NFS4_MAX_UINT64,
730 IOMODE_READ,
731 GFP_KERNEL);
732 /* If no lseg, fall back to read through mds */
733 if (pgio->pg_lseg == NULL)
734 goto out_mds;
736 ds = ff_layout_choose_best_ds_for_read(pgio, &ds_idx);
737 if (!ds)
738 goto out_mds;
739 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
741 pgio->pg_mirror_idx = ds_idx;
743 /* read always uses only one mirror - idx 0 for pgio layer */
744 pgm = &pgio->pg_mirrors[0];
745 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
747 return;
748 out_mds:
749 pnfs_put_lseg(pgio->pg_lseg);
750 pgio->pg_lseg = NULL;
751 nfs_pageio_reset_read_mds(pgio);
754 static void
755 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
756 struct nfs_page *req)
758 struct nfs4_ff_layout_mirror *mirror;
759 struct nfs_pgio_mirror *pgm;
760 struct nfs_commit_info cinfo;
761 struct nfs4_pnfs_ds *ds;
762 int i;
763 int status;
765 if (!pgio->pg_lseg)
766 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
767 req->wb_context,
769 NFS4_MAX_UINT64,
770 IOMODE_RW,
771 GFP_NOFS);
772 /* If no lseg, fall back to write through mds */
773 if (pgio->pg_lseg == NULL)
774 goto out_mds;
776 nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
777 status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
778 if (status < 0)
779 goto out_mds;
781 /* Use a direct mapping of ds_idx to pgio mirror_idx */
782 if (WARN_ON_ONCE(pgio->pg_mirror_count !=
783 FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
784 goto out_mds;
786 for (i = 0; i < pgio->pg_mirror_count; i++) {
787 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
788 if (!ds)
789 goto out_mds;
790 pgm = &pgio->pg_mirrors[i];
791 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
792 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
795 return;
797 out_mds:
798 pnfs_put_lseg(pgio->pg_lseg);
799 pgio->pg_lseg = NULL;
800 nfs_pageio_reset_write_mds(pgio);
803 static unsigned int
804 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
805 struct nfs_page *req)
807 if (!pgio->pg_lseg)
808 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
809 req->wb_context,
811 NFS4_MAX_UINT64,
812 IOMODE_RW,
813 GFP_NOFS);
814 if (pgio->pg_lseg)
815 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
817 /* no lseg means that pnfs is not in use, so no mirroring here */
818 nfs_pageio_reset_write_mds(pgio);
819 return 1;
822 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
823 .pg_init = ff_layout_pg_init_read,
824 .pg_test = pnfs_generic_pg_test,
825 .pg_doio = pnfs_generic_pg_readpages,
826 .pg_cleanup = pnfs_generic_pg_cleanup,
829 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
830 .pg_init = ff_layout_pg_init_write,
831 .pg_test = pnfs_generic_pg_test,
832 .pg_doio = pnfs_generic_pg_writepages,
833 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
834 .pg_cleanup = pnfs_generic_pg_cleanup,
837 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
839 struct rpc_task *task = &hdr->task;
841 pnfs_layoutcommit_inode(hdr->inode, false);
843 if (retry_pnfs) {
844 dprintk("%s Reset task %5u for i/o through pNFS "
845 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
846 hdr->task.tk_pid,
847 hdr->inode->i_sb->s_id,
848 (unsigned long long)NFS_FILEID(hdr->inode),
849 hdr->args.count,
850 (unsigned long long)hdr->args.offset);
852 if (!hdr->dreq) {
853 struct nfs_open_context *ctx;
855 ctx = nfs_list_entry(hdr->pages.next)->wb_context;
856 set_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
857 hdr->completion_ops->error_cleanup(&hdr->pages);
858 } else {
859 nfs_direct_set_resched_writes(hdr->dreq);
860 /* fake unstable write to let common nfs resend pages */
861 hdr->verf.committed = NFS_UNSTABLE;
862 hdr->good_bytes = hdr->args.count;
864 return;
867 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
868 dprintk("%s Reset task %5u for i/o through MDS "
869 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
870 hdr->task.tk_pid,
871 hdr->inode->i_sb->s_id,
872 (unsigned long long)NFS_FILEID(hdr->inode),
873 hdr->args.count,
874 (unsigned long long)hdr->args.offset);
876 task->tk_status = pnfs_write_done_resend_to_mds(hdr);
880 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
882 struct rpc_task *task = &hdr->task;
884 pnfs_layoutcommit_inode(hdr->inode, false);
886 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
887 dprintk("%s Reset task %5u for i/o through MDS "
888 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
889 hdr->task.tk_pid,
890 hdr->inode->i_sb->s_id,
891 (unsigned long long)NFS_FILEID(hdr->inode),
892 hdr->args.count,
893 (unsigned long long)hdr->args.offset);
895 task->tk_status = pnfs_read_done_resend_to_mds(hdr);
899 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
900 struct nfs4_state *state,
901 struct nfs_client *clp,
902 struct pnfs_layout_segment *lseg,
903 int idx)
905 struct pnfs_layout_hdr *lo = lseg->pls_layout;
906 struct inode *inode = lo->plh_inode;
907 struct nfs_server *mds_server = NFS_SERVER(inode);
909 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
910 struct nfs_client *mds_client = mds_server->nfs_client;
911 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
913 if (task->tk_status >= 0)
914 return 0;
916 switch (task->tk_status) {
917 /* MDS state errors */
918 case -NFS4ERR_DELEG_REVOKED:
919 case -NFS4ERR_ADMIN_REVOKED:
920 case -NFS4ERR_BAD_STATEID:
921 if (state == NULL)
922 break;
923 nfs_remove_bad_delegation(state->inode);
924 case -NFS4ERR_OPENMODE:
925 if (state == NULL)
926 break;
927 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
928 goto out_bad_stateid;
929 goto wait_on_recovery;
930 case -NFS4ERR_EXPIRED:
931 if (state != NULL) {
932 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
933 goto out_bad_stateid;
935 nfs4_schedule_lease_recovery(mds_client);
936 goto wait_on_recovery;
937 /* DS session errors */
938 case -NFS4ERR_BADSESSION:
939 case -NFS4ERR_BADSLOT:
940 case -NFS4ERR_BAD_HIGH_SLOT:
941 case -NFS4ERR_DEADSESSION:
942 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
943 case -NFS4ERR_SEQ_FALSE_RETRY:
944 case -NFS4ERR_SEQ_MISORDERED:
945 dprintk("%s ERROR %d, Reset session. Exchangeid "
946 "flags 0x%x\n", __func__, task->tk_status,
947 clp->cl_exchange_flags);
948 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
949 break;
950 case -NFS4ERR_DELAY:
951 case -NFS4ERR_GRACE:
952 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
953 break;
954 case -NFS4ERR_RETRY_UNCACHED_REP:
955 break;
956 /* Invalidate Layout errors */
957 case -NFS4ERR_PNFS_NO_LAYOUT:
958 case -ESTALE: /* mapped NFS4ERR_STALE */
959 case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
960 case -EISDIR: /* mapped NFS4ERR_ISDIR */
961 case -NFS4ERR_FHEXPIRED:
962 case -NFS4ERR_WRONG_TYPE:
963 dprintk("%s Invalid layout error %d\n", __func__,
964 task->tk_status);
966 * Destroy layout so new i/o will get a new layout.
967 * Layout will not be destroyed until all current lseg
968 * references are put. Mark layout as invalid to resend failed
969 * i/o and all i/o waiting on the slot table to the MDS until
970 * layout is destroyed and a new valid layout is obtained.
972 pnfs_destroy_layout(NFS_I(inode));
973 rpc_wake_up(&tbl->slot_tbl_waitq);
974 goto reset;
975 /* RPC connection errors */
976 case -ECONNREFUSED:
977 case -EHOSTDOWN:
978 case -EHOSTUNREACH:
979 case -ENETUNREACH:
980 case -EIO:
981 case -ETIMEDOUT:
982 case -EPIPE:
983 dprintk("%s DS connection error %d\n", __func__,
984 task->tk_status);
985 nfs4_mark_deviceid_unavailable(devid);
986 rpc_wake_up(&tbl->slot_tbl_waitq);
987 /* fall through */
988 default:
989 if (ff_layout_has_available_ds(lseg))
990 return -NFS4ERR_RESET_TO_PNFS;
991 reset:
992 dprintk("%s Retry through MDS. Error %d\n", __func__,
993 task->tk_status);
994 return -NFS4ERR_RESET_TO_MDS;
996 out:
997 task->tk_status = 0;
998 return -EAGAIN;
999 out_bad_stateid:
1000 task->tk_status = -EIO;
1001 return 0;
1002 wait_on_recovery:
1003 rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
1004 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
1005 rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
1006 goto out;
1009 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1010 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1011 struct pnfs_layout_segment *lseg,
1012 int idx)
1014 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1016 if (task->tk_status >= 0)
1017 return 0;
1019 if (task->tk_status != -EJUKEBOX) {
1020 dprintk("%s DS connection error %d\n", __func__,
1021 task->tk_status);
1022 nfs4_mark_deviceid_unavailable(devid);
1023 if (ff_layout_has_available_ds(lseg))
1024 return -NFS4ERR_RESET_TO_PNFS;
1025 else
1026 return -NFS4ERR_RESET_TO_MDS;
1029 if (task->tk_status == -EJUKEBOX)
1030 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1031 task->tk_status = 0;
1032 rpc_restart_call(task);
1033 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1034 return -EAGAIN;
1037 static int ff_layout_async_handle_error(struct rpc_task *task,
1038 struct nfs4_state *state,
1039 struct nfs_client *clp,
1040 struct pnfs_layout_segment *lseg,
1041 int idx)
1043 int vers = clp->cl_nfs_mod->rpc_vers->number;
1045 switch (vers) {
1046 case 3:
1047 return ff_layout_async_handle_error_v3(task, lseg, idx);
1048 case 4:
1049 return ff_layout_async_handle_error_v4(task, state, clp,
1050 lseg, idx);
1051 default:
1052 /* should never happen */
1053 WARN_ON_ONCE(1);
1054 return 0;
1058 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1059 int idx, u64 offset, u64 length,
1060 u32 status, int opnum, int error)
1062 struct nfs4_ff_layout_mirror *mirror;
1063 int err;
1065 if (status == 0) {
1066 switch (error) {
1067 case -ETIMEDOUT:
1068 case -EPFNOSUPPORT:
1069 case -EPROTONOSUPPORT:
1070 case -EOPNOTSUPP:
1071 case -ECONNREFUSED:
1072 case -ECONNRESET:
1073 case -EHOSTDOWN:
1074 case -EHOSTUNREACH:
1075 case -ENETUNREACH:
1076 case -EADDRINUSE:
1077 case -ENOBUFS:
1078 case -EPIPE:
1079 case -EPERM:
1080 status = NFS4ERR_NXIO;
1081 break;
1082 case -EACCES:
1083 status = NFS4ERR_ACCESS;
1084 break;
1085 default:
1086 return;
1090 mirror = FF_LAYOUT_COMP(lseg, idx);
1091 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1092 mirror, offset, length, status, opnum,
1093 GFP_NOIO);
1094 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1097 /* NFS_PROTO call done callback routines */
1099 static int ff_layout_read_done_cb(struct rpc_task *task,
1100 struct nfs_pgio_header *hdr)
1102 struct inode *inode;
1103 int err;
1105 trace_nfs4_pnfs_read(hdr, task->tk_status);
1106 if (task->tk_status < 0)
1107 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1108 hdr->args.offset, hdr->args.count,
1109 hdr->res.op_status, OP_READ,
1110 task->tk_status);
1111 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1112 hdr->ds_clp, hdr->lseg,
1113 hdr->pgio_mirror_idx);
1115 switch (err) {
1116 case -NFS4ERR_RESET_TO_PNFS:
1117 set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1118 &hdr->lseg->pls_layout->plh_flags);
1119 pnfs_read_resend_pnfs(hdr);
1120 return task->tk_status;
1121 case -NFS4ERR_RESET_TO_MDS:
1122 inode = hdr->lseg->pls_layout->plh_inode;
1123 pnfs_error_mark_layout_for_return(inode, hdr->lseg);
1124 ff_layout_reset_read(hdr);
1125 return task->tk_status;
1126 case -EAGAIN:
1127 rpc_restart_call_prepare(task);
1128 return -EAGAIN;
1131 return 0;
1134 static bool
1135 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1137 return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1141 * We reference the rpc_cred of the first WRITE that triggers the need for
1142 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1143 * rfc5661 is not clear about which credential should be used.
1145 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1146 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1147 * we always send layoutcommit after DS writes.
1149 static void
1150 ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr)
1152 if (!ff_layout_need_layoutcommit(hdr->lseg))
1153 return;
1155 pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
1156 hdr->mds_offset + hdr->res.count);
1157 dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
1158 (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
1161 static bool
1162 ff_layout_reset_to_mds(struct pnfs_layout_segment *lseg, int idx)
1164 /* No mirroring for now */
1165 struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
1167 return ff_layout_test_devid_unavailable(node);
1170 static int ff_layout_read_prepare_common(struct rpc_task *task,
1171 struct nfs_pgio_header *hdr)
1173 nfs4_ff_layout_stat_io_start_read(hdr->inode,
1174 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1175 hdr->args.count,
1176 task->tk_start);
1178 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1179 rpc_exit(task, -EIO);
1180 return -EIO;
1182 if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
1183 dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
1184 if (ff_layout_has_available_ds(hdr->lseg))
1185 pnfs_read_resend_pnfs(hdr);
1186 else
1187 ff_layout_reset_read(hdr);
1188 rpc_exit(task, 0);
1189 return -EAGAIN;
1191 hdr->pgio_done_cb = ff_layout_read_done_cb;
1193 return 0;
1197 * Call ops for the async read/write cases
1198 * In the case of dense layouts, the offset needs to be reset to its
1199 * original value.
1201 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1203 struct nfs_pgio_header *hdr = data;
1205 if (ff_layout_read_prepare_common(task, hdr))
1206 return;
1208 rpc_call_start(task);
1211 static int ff_layout_setup_sequence(struct nfs_client *ds_clp,
1212 struct nfs4_sequence_args *args,
1213 struct nfs4_sequence_res *res,
1214 struct rpc_task *task)
1216 if (ds_clp->cl_session)
1217 return nfs41_setup_sequence(ds_clp->cl_session,
1218 args,
1219 res,
1220 task);
1221 return nfs40_setup_sequence(ds_clp->cl_slot_tbl,
1222 args,
1223 res,
1224 task);
1227 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1229 struct nfs_pgio_header *hdr = data;
1231 if (ff_layout_setup_sequence(hdr->ds_clp,
1232 &hdr->args.seq_args,
1233 &hdr->res.seq_res,
1234 task))
1235 return;
1237 if (ff_layout_read_prepare_common(task, hdr))
1238 return;
1240 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1241 hdr->args.lock_context, FMODE_READ) == -EIO)
1242 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1245 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1247 struct nfs_pgio_header *hdr = data;
1249 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1251 nfs4_ff_layout_stat_io_end_read(task,
1252 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1253 hdr->args.count, hdr->res.count);
1255 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1256 task->tk_status == 0) {
1257 nfs4_sequence_done(task, &hdr->res.seq_res);
1258 return;
1261 /* Note this may cause RPC to be resent */
1262 hdr->mds_ops->rpc_call_done(task, hdr);
1265 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1267 struct nfs_pgio_header *hdr = data;
1269 rpc_count_iostats_metrics(task,
1270 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1273 static int ff_layout_write_done_cb(struct rpc_task *task,
1274 struct nfs_pgio_header *hdr)
1276 struct inode *inode;
1277 int err;
1279 trace_nfs4_pnfs_write(hdr, task->tk_status);
1280 if (task->tk_status < 0)
1281 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1282 hdr->args.offset, hdr->args.count,
1283 hdr->res.op_status, OP_WRITE,
1284 task->tk_status);
1285 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1286 hdr->ds_clp, hdr->lseg,
1287 hdr->pgio_mirror_idx);
1289 switch (err) {
1290 case -NFS4ERR_RESET_TO_PNFS:
1291 case -NFS4ERR_RESET_TO_MDS:
1292 inode = hdr->lseg->pls_layout->plh_inode;
1293 pnfs_error_mark_layout_for_return(inode, hdr->lseg);
1294 if (err == -NFS4ERR_RESET_TO_PNFS) {
1295 pnfs_set_retry_layoutget(hdr->lseg->pls_layout);
1296 ff_layout_reset_write(hdr, true);
1297 } else {
1298 pnfs_clear_retry_layoutget(hdr->lseg->pls_layout);
1299 ff_layout_reset_write(hdr, false);
1301 return task->tk_status;
1302 case -EAGAIN:
1303 rpc_restart_call_prepare(task);
1304 return -EAGAIN;
1307 if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1308 hdr->res.verf->committed == NFS_DATA_SYNC)
1309 ff_layout_set_layoutcommit(hdr);
1311 /* zero out fattr since we don't care DS attr at all */
1312 hdr->fattr.valid = 0;
1313 if (task->tk_status >= 0)
1314 nfs_writeback_update_inode(hdr);
1316 return 0;
1319 static int ff_layout_commit_done_cb(struct rpc_task *task,
1320 struct nfs_commit_data *data)
1322 struct inode *inode;
1323 int err;
1325 trace_nfs4_pnfs_commit_ds(data, task->tk_status);
1326 if (task->tk_status < 0)
1327 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1328 data->args.offset, data->args.count,
1329 data->res.op_status, OP_COMMIT,
1330 task->tk_status);
1331 err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1332 data->lseg, data->ds_commit_index);
1334 switch (err) {
1335 case -NFS4ERR_RESET_TO_PNFS:
1336 case -NFS4ERR_RESET_TO_MDS:
1337 inode = data->lseg->pls_layout->plh_inode;
1338 pnfs_error_mark_layout_for_return(inode, data->lseg);
1339 if (err == -NFS4ERR_RESET_TO_PNFS)
1340 pnfs_set_retry_layoutget(data->lseg->pls_layout);
1341 else
1342 pnfs_clear_retry_layoutget(data->lseg->pls_layout);
1343 pnfs_generic_prepare_to_resend_writes(data);
1344 return -EAGAIN;
1345 case -EAGAIN:
1346 rpc_restart_call_prepare(task);
1347 return -EAGAIN;
1350 if (data->verf.committed == NFS_UNSTABLE
1351 && ff_layout_need_layoutcommit(data->lseg))
1352 pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb);
1354 return 0;
1357 static int ff_layout_write_prepare_common(struct rpc_task *task,
1358 struct nfs_pgio_header *hdr)
1360 nfs4_ff_layout_stat_io_start_write(hdr->inode,
1361 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1362 hdr->args.count,
1363 task->tk_start);
1365 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1366 rpc_exit(task, -EIO);
1367 return -EIO;
1370 if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
1371 bool retry_pnfs;
1373 retry_pnfs = ff_layout_has_available_ds(hdr->lseg);
1374 dprintk("%s task %u reset io to %s\n", __func__,
1375 task->tk_pid, retry_pnfs ? "pNFS" : "MDS");
1376 ff_layout_reset_write(hdr, retry_pnfs);
1377 rpc_exit(task, 0);
1378 return -EAGAIN;
1381 return 0;
1384 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1386 struct nfs_pgio_header *hdr = data;
1388 if (ff_layout_write_prepare_common(task, hdr))
1389 return;
1391 rpc_call_start(task);
1394 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1396 struct nfs_pgio_header *hdr = data;
1398 if (ff_layout_setup_sequence(hdr->ds_clp,
1399 &hdr->args.seq_args,
1400 &hdr->res.seq_res,
1401 task))
1402 return;
1404 if (ff_layout_write_prepare_common(task, hdr))
1405 return;
1407 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1408 hdr->args.lock_context, FMODE_WRITE) == -EIO)
1409 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1412 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1414 struct nfs_pgio_header *hdr = data;
1416 nfs4_ff_layout_stat_io_end_write(task,
1417 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1418 hdr->args.count, hdr->res.count,
1419 hdr->res.verf->committed);
1421 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1422 task->tk_status == 0) {
1423 nfs4_sequence_done(task, &hdr->res.seq_res);
1424 return;
1427 /* Note this may cause RPC to be resent */
1428 hdr->mds_ops->rpc_call_done(task, hdr);
1431 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1433 struct nfs_pgio_header *hdr = data;
1435 rpc_count_iostats_metrics(task,
1436 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1439 static void ff_layout_commit_prepare_common(struct rpc_task *task,
1440 struct nfs_commit_data *cdata)
1442 nfs4_ff_layout_stat_io_start_write(cdata->inode,
1443 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1444 0, task->tk_start);
1447 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1449 ff_layout_commit_prepare_common(task, data);
1450 rpc_call_start(task);
1453 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1455 struct nfs_commit_data *wdata = data;
1457 if (ff_layout_setup_sequence(wdata->ds_clp,
1458 &wdata->args.seq_args,
1459 &wdata->res.seq_res,
1460 task))
1461 return;
1462 ff_layout_commit_prepare_common(task, data);
1465 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1467 struct nfs_commit_data *cdata = data;
1468 struct nfs_page *req;
1469 __u64 count = 0;
1471 if (task->tk_status == 0) {
1472 list_for_each_entry(req, &cdata->pages, wb_list)
1473 count += req->wb_bytes;
1476 nfs4_ff_layout_stat_io_end_write(task,
1477 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1478 count, count, NFS_FILE_SYNC);
1480 pnfs_generic_write_commit_done(task, data);
1483 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1485 struct nfs_commit_data *cdata = data;
1487 rpc_count_iostats_metrics(task,
1488 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1491 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1492 .rpc_call_prepare = ff_layout_read_prepare_v3,
1493 .rpc_call_done = ff_layout_read_call_done,
1494 .rpc_count_stats = ff_layout_read_count_stats,
1495 .rpc_release = pnfs_generic_rw_release,
1498 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1499 .rpc_call_prepare = ff_layout_read_prepare_v4,
1500 .rpc_call_done = ff_layout_read_call_done,
1501 .rpc_count_stats = ff_layout_read_count_stats,
1502 .rpc_release = pnfs_generic_rw_release,
1505 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1506 .rpc_call_prepare = ff_layout_write_prepare_v3,
1507 .rpc_call_done = ff_layout_write_call_done,
1508 .rpc_count_stats = ff_layout_write_count_stats,
1509 .rpc_release = pnfs_generic_rw_release,
1512 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1513 .rpc_call_prepare = ff_layout_write_prepare_v4,
1514 .rpc_call_done = ff_layout_write_call_done,
1515 .rpc_count_stats = ff_layout_write_count_stats,
1516 .rpc_release = pnfs_generic_rw_release,
1519 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1520 .rpc_call_prepare = ff_layout_commit_prepare_v3,
1521 .rpc_call_done = ff_layout_commit_done,
1522 .rpc_count_stats = ff_layout_commit_count_stats,
1523 .rpc_release = pnfs_generic_commit_release,
1526 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1527 .rpc_call_prepare = ff_layout_commit_prepare_v4,
1528 .rpc_call_done = ff_layout_commit_done,
1529 .rpc_count_stats = ff_layout_commit_count_stats,
1530 .rpc_release = pnfs_generic_commit_release,
1533 static enum pnfs_try_status
1534 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1536 struct pnfs_layout_segment *lseg = hdr->lseg;
1537 struct nfs4_pnfs_ds *ds;
1538 struct rpc_clnt *ds_clnt;
1539 struct rpc_cred *ds_cred;
1540 loff_t offset = hdr->args.offset;
1541 u32 idx = hdr->pgio_mirror_idx;
1542 int vers;
1543 struct nfs_fh *fh;
1545 dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
1546 __func__, hdr->inode->i_ino,
1547 hdr->args.pgbase, (size_t)hdr->args.count, offset);
1549 ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
1550 if (!ds)
1551 goto out_failed;
1553 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1554 hdr->inode);
1555 if (IS_ERR(ds_clnt))
1556 goto out_failed;
1558 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1559 if (IS_ERR(ds_cred))
1560 goto out_failed;
1562 vers = nfs4_ff_layout_ds_version(lseg, idx);
1564 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1565 ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers);
1567 atomic_inc(&ds->ds_clp->cl_count);
1568 hdr->ds_clp = ds->ds_clp;
1569 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1570 if (fh)
1571 hdr->args.fh = fh;
1573 * Note that if we ever decide to split across DSes,
1574 * then we may need to handle dense-like offsets.
1576 hdr->args.offset = offset;
1577 hdr->mds_offset = offset;
1579 /* Perform an asynchronous read to ds */
1580 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1581 vers == 3 ? &ff_layout_read_call_ops_v3 :
1582 &ff_layout_read_call_ops_v4,
1583 0, RPC_TASK_SOFTCONN);
1585 return PNFS_ATTEMPTED;
1587 out_failed:
1588 if (ff_layout_has_available_ds(lseg))
1589 return PNFS_TRY_AGAIN;
1590 return PNFS_NOT_ATTEMPTED;
1593 /* Perform async writes. */
1594 static enum pnfs_try_status
1595 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1597 struct pnfs_layout_segment *lseg = hdr->lseg;
1598 struct nfs4_pnfs_ds *ds;
1599 struct rpc_clnt *ds_clnt;
1600 struct rpc_cred *ds_cred;
1601 loff_t offset = hdr->args.offset;
1602 int vers;
1603 struct nfs_fh *fh;
1604 int idx = hdr->pgio_mirror_idx;
1606 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1607 if (!ds)
1608 return PNFS_NOT_ATTEMPTED;
1610 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1611 hdr->inode);
1612 if (IS_ERR(ds_clnt))
1613 return PNFS_NOT_ATTEMPTED;
1615 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1616 if (IS_ERR(ds_cred))
1617 return PNFS_NOT_ATTEMPTED;
1619 vers = nfs4_ff_layout_ds_version(lseg, idx);
1621 dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n",
1622 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1623 offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count),
1624 vers);
1626 hdr->pgio_done_cb = ff_layout_write_done_cb;
1627 atomic_inc(&ds->ds_clp->cl_count);
1628 hdr->ds_clp = ds->ds_clp;
1629 hdr->ds_commit_idx = idx;
1630 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1631 if (fh)
1632 hdr->args.fh = fh;
1635 * Note that if we ever decide to split across DSes,
1636 * then we may need to handle dense-like offsets.
1638 hdr->args.offset = offset;
1640 /* Perform an asynchronous write */
1641 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1642 vers == 3 ? &ff_layout_write_call_ops_v3 :
1643 &ff_layout_write_call_ops_v4,
1644 sync, RPC_TASK_SOFTCONN);
1645 return PNFS_ATTEMPTED;
1648 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1650 return i;
1653 static struct nfs_fh *
1654 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1656 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1658 /* FIXME: Assume that there is only one NFS version available
1659 * for the DS.
1661 return &flseg->mirror_array[i]->fh_versions[0];
1664 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1666 struct pnfs_layout_segment *lseg = data->lseg;
1667 struct nfs4_pnfs_ds *ds;
1668 struct rpc_clnt *ds_clnt;
1669 struct rpc_cred *ds_cred;
1670 u32 idx;
1671 int vers;
1672 struct nfs_fh *fh;
1674 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1675 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1676 if (!ds)
1677 goto out_err;
1679 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1680 data->inode);
1681 if (IS_ERR(ds_clnt))
1682 goto out_err;
1684 ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
1685 if (IS_ERR(ds_cred))
1686 goto out_err;
1688 vers = nfs4_ff_layout_ds_version(lseg, idx);
1690 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1691 data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count),
1692 vers);
1693 data->commit_done_cb = ff_layout_commit_done_cb;
1694 data->cred = ds_cred;
1695 atomic_inc(&ds->ds_clp->cl_count);
1696 data->ds_clp = ds->ds_clp;
1697 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1698 if (fh)
1699 data->args.fh = fh;
1701 return nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1702 vers == 3 ? &ff_layout_commit_call_ops_v3 :
1703 &ff_layout_commit_call_ops_v4,
1704 how, RPC_TASK_SOFTCONN);
1705 out_err:
1706 pnfs_generic_prepare_to_resend_writes(data);
1707 pnfs_generic_commit_release(data);
1708 return -EAGAIN;
1711 static int
1712 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1713 int how, struct nfs_commit_info *cinfo)
1715 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1716 ff_layout_initiate_commit);
1719 static struct pnfs_ds_commit_info *
1720 ff_layout_get_ds_info(struct inode *inode)
1722 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1724 if (layout == NULL)
1725 return NULL;
1727 return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1730 static void
1731 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
1733 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
1734 id_node));
1737 static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout *flo,
1738 struct xdr_stream *xdr,
1739 const struct nfs4_layoutreturn_args *args)
1741 struct pnfs_layout_hdr *hdr = &flo->generic_hdr;
1742 __be32 *start;
1743 int count = 0, ret = 0;
1745 start = xdr_reserve_space(xdr, 4);
1746 if (unlikely(!start))
1747 return -E2BIG;
1749 /* This assume we always return _ALL_ layouts */
1750 spin_lock(&hdr->plh_inode->i_lock);
1751 ret = ff_layout_encode_ds_ioerr(flo, xdr, &count, &args->range);
1752 spin_unlock(&hdr->plh_inode->i_lock);
1754 *start = cpu_to_be32(count);
1756 return ret;
1759 /* report nothing for now */
1760 static void ff_layout_encode_iostats(struct nfs4_flexfile_layout *flo,
1761 struct xdr_stream *xdr,
1762 const struct nfs4_layoutreturn_args *args)
1764 __be32 *p;
1766 p = xdr_reserve_space(xdr, 4);
1767 if (likely(p))
1768 *p = cpu_to_be32(0);
1771 static struct nfs4_deviceid_node *
1772 ff_layout_alloc_deviceid_node(struct nfs_server *server,
1773 struct pnfs_device *pdev, gfp_t gfp_flags)
1775 struct nfs4_ff_layout_ds *dsaddr;
1777 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
1778 if (!dsaddr)
1779 return NULL;
1780 return &dsaddr->id_node;
1783 static void
1784 ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
1785 struct xdr_stream *xdr,
1786 const struct nfs4_layoutreturn_args *args)
1788 struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
1789 __be32 *start;
1791 dprintk("%s: Begin\n", __func__);
1792 start = xdr_reserve_space(xdr, 4);
1793 BUG_ON(!start);
1795 if (ff_layout_encode_ioerr(flo, xdr, args))
1796 goto out;
1798 ff_layout_encode_iostats(flo, xdr, args);
1799 out:
1800 *start = cpu_to_be32((xdr->p - start - 1) * 4);
1801 dprintk("%s: Return\n", __func__);
1804 static int
1805 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
1807 const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
1809 return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
1812 static size_t
1813 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
1814 const int buflen)
1816 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
1817 const struct in6_addr *addr = &sin6->sin6_addr;
1820 * RFC 4291, Section 2.2.2
1822 * Shorthanded ANY address
1824 if (ipv6_addr_any(addr))
1825 return snprintf(buf, buflen, "::");
1828 * RFC 4291, Section 2.2.2
1830 * Shorthanded loopback address
1832 if (ipv6_addr_loopback(addr))
1833 return snprintf(buf, buflen, "::1");
1836 * RFC 4291, Section 2.2.3
1838 * Special presentation address format for mapped v4
1839 * addresses.
1841 if (ipv6_addr_v4mapped(addr))
1842 return snprintf(buf, buflen, "::ffff:%pI4",
1843 &addr->s6_addr32[3]);
1846 * RFC 4291, Section 2.2.1
1848 return snprintf(buf, buflen, "%pI6c", addr);
1851 /* Derived from rpc_sockaddr2uaddr */
1852 static void
1853 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
1855 struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
1856 char portbuf[RPCBIND_MAXUADDRPLEN];
1857 char addrbuf[RPCBIND_MAXUADDRLEN];
1858 char *netid;
1859 unsigned short port;
1860 int len, netid_len;
1861 __be32 *p;
1863 switch (sap->sa_family) {
1864 case AF_INET:
1865 if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
1866 return;
1867 port = ntohs(((struct sockaddr_in *)sap)->sin_port);
1868 netid = "tcp";
1869 netid_len = 3;
1870 break;
1871 case AF_INET6:
1872 if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
1873 return;
1874 port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
1875 netid = "tcp6";
1876 netid_len = 4;
1877 break;
1878 default:
1879 /* we only support tcp and tcp6 */
1880 WARN_ON_ONCE(1);
1881 return;
1884 snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
1885 len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
1887 p = xdr_reserve_space(xdr, 4 + netid_len);
1888 xdr_encode_opaque(p, netid, netid_len);
1890 p = xdr_reserve_space(xdr, 4 + len);
1891 xdr_encode_opaque(p, addrbuf, len);
1894 static void
1895 ff_layout_encode_nfstime(struct xdr_stream *xdr,
1896 ktime_t t)
1898 struct timespec64 ts;
1899 __be32 *p;
1901 p = xdr_reserve_space(xdr, 12);
1902 ts = ktime_to_timespec64(t);
1903 p = xdr_encode_hyper(p, ts.tv_sec);
1904 *p++ = cpu_to_be32(ts.tv_nsec);
1907 static void
1908 ff_layout_encode_io_latency(struct xdr_stream *xdr,
1909 struct nfs4_ff_io_stat *stat)
1911 __be32 *p;
1913 p = xdr_reserve_space(xdr, 5 * 8);
1914 p = xdr_encode_hyper(p, stat->ops_requested);
1915 p = xdr_encode_hyper(p, stat->bytes_requested);
1916 p = xdr_encode_hyper(p, stat->ops_completed);
1917 p = xdr_encode_hyper(p, stat->bytes_completed);
1918 p = xdr_encode_hyper(p, stat->bytes_not_delivered);
1919 ff_layout_encode_nfstime(xdr, stat->total_busy_time);
1920 ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
1923 static void
1924 ff_layout_encode_layoutstats(struct xdr_stream *xdr,
1925 struct nfs42_layoutstat_args *args,
1926 struct nfs42_layoutstat_devinfo *devinfo)
1928 struct nfs4_ff_layout_mirror *mirror = devinfo->layout_private;
1929 struct nfs4_pnfs_ds_addr *da;
1930 struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
1931 struct nfs_fh *fh = &mirror->fh_versions[0];
1932 __be32 *p, *start;
1934 da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
1935 dprintk("%s: DS %s: encoding address %s\n",
1936 __func__, ds->ds_remotestr, da->da_remotestr);
1937 /* layoutupdate length */
1938 start = xdr_reserve_space(xdr, 4);
1939 /* netaddr4 */
1940 ff_layout_encode_netaddr(xdr, da);
1941 /* nfs_fh4 */
1942 p = xdr_reserve_space(xdr, 4 + fh->size);
1943 xdr_encode_opaque(p, fh->data, fh->size);
1944 /* ff_io_latency4 read */
1945 spin_lock(&mirror->lock);
1946 ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
1947 /* ff_io_latency4 write */
1948 ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
1949 spin_unlock(&mirror->lock);
1950 /* nfstime4 */
1951 ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
1952 /* bool */
1953 p = xdr_reserve_space(xdr, 4);
1954 *p = cpu_to_be32(false);
1956 *start = cpu_to_be32((xdr->p - start - 1) * 4);
1959 static int
1960 ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
1961 struct pnfs_layout_hdr *lo,
1962 int dev_limit)
1964 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
1965 struct nfs4_ff_layout_mirror *mirror;
1966 struct nfs4_deviceid_node *dev;
1967 struct nfs42_layoutstat_devinfo *devinfo;
1968 int i = 0;
1970 list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
1971 if (i >= dev_limit)
1972 break;
1973 if (!mirror->mirror_ds)
1974 continue;
1975 /* mirror refcount put in cleanup_layoutstats */
1976 if (!atomic_inc_not_zero(&mirror->ref))
1977 continue;
1978 dev = &mirror->mirror_ds->id_node;
1979 devinfo = &args->devinfo[i];
1980 memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
1981 devinfo->offset = 0;
1982 devinfo->length = NFS4_MAX_UINT64;
1983 devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
1984 devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
1985 devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
1986 devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
1987 devinfo->layout_type = LAYOUT_FLEX_FILES;
1988 devinfo->layoutstats_encode = ff_layout_encode_layoutstats;
1989 devinfo->layout_private = mirror;
1991 i++;
1993 return i;
1996 static int
1997 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
1999 struct nfs4_flexfile_layout *ff_layout;
2000 struct nfs4_ff_layout_mirror *mirror;
2001 int dev_count = 0;
2003 spin_lock(&args->inode->i_lock);
2004 ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2005 list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2006 if (atomic_read(&mirror->ref) != 0)
2007 dev_count ++;
2009 spin_unlock(&args->inode->i_lock);
2010 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2011 if (dev_count > PNFS_LAYOUTSTATS_MAXDEV) {
2012 dprintk("%s: truncating devinfo to limit (%d:%d)\n",
2013 __func__, dev_count, PNFS_LAYOUTSTATS_MAXDEV);
2014 dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2016 args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
2017 if (!args->devinfo)
2018 return -ENOMEM;
2020 spin_lock(&args->inode->i_lock);
2021 args->num_dev = ff_layout_mirror_prepare_stats(args,
2022 &ff_layout->generic_hdr, dev_count);
2023 spin_unlock(&args->inode->i_lock);
2025 return 0;
2028 static void
2029 ff_layout_cleanup_layoutstats(struct nfs42_layoutstat_data *data)
2031 struct nfs4_ff_layout_mirror *mirror;
2032 int i;
2034 for (i = 0; i < data->args.num_dev; i++) {
2035 mirror = data->args.devinfo[i].layout_private;
2036 data->args.devinfo[i].layout_private = NULL;
2037 ff_layout_put_mirror(mirror);
2041 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2042 .id = LAYOUT_FLEX_FILES,
2043 .name = "LAYOUT_FLEX_FILES",
2044 .owner = THIS_MODULE,
2045 .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
2046 .free_layout_hdr = ff_layout_free_layout_hdr,
2047 .alloc_lseg = ff_layout_alloc_lseg,
2048 .free_lseg = ff_layout_free_lseg,
2049 .pg_read_ops = &ff_layout_pg_read_ops,
2050 .pg_write_ops = &ff_layout_pg_write_ops,
2051 .get_ds_info = ff_layout_get_ds_info,
2052 .free_deviceid_node = ff_layout_free_deviceid_node,
2053 .mark_request_commit = pnfs_layout_mark_request_commit,
2054 .clear_request_commit = pnfs_generic_clear_request_commit,
2055 .scan_commit_lists = pnfs_generic_scan_commit_lists,
2056 .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
2057 .commit_pagelist = ff_layout_commit_pagelist,
2058 .read_pagelist = ff_layout_read_pagelist,
2059 .write_pagelist = ff_layout_write_pagelist,
2060 .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
2061 .encode_layoutreturn = ff_layout_encode_layoutreturn,
2062 .sync = pnfs_nfs_generic_sync,
2063 .prepare_layoutstats = ff_layout_prepare_layoutstats,
2064 .cleanup_layoutstats = ff_layout_cleanup_layoutstats,
2067 static int __init nfs4flexfilelayout_init(void)
2069 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2070 __func__);
2071 return pnfs_register_layoutdriver(&flexfilelayout_type);
2074 static void __exit nfs4flexfilelayout_exit(void)
2076 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2077 __func__);
2078 pnfs_unregister_layoutdriver(&flexfilelayout_type);
2081 MODULE_ALIAS("nfs-layouttype4-4");
2083 MODULE_LICENSE("GPL");
2084 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2086 module_init(nfs4flexfilelayout_init);
2087 module_exit(nfs4flexfilelayout_exit);