1 /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
4 * Filesystem request handling methods
8 #include <linux/slab.h>
9 #include <linux/hdreg.h>
10 #include <linux/blkdev.h>
11 #include <linux/skbuff.h>
12 #include <linux/netdevice.h>
13 #include <linux/genhd.h>
14 #include <linux/moduleparam.h>
15 #include <linux/workqueue.h>
16 #include <linux/kthread.h>
17 #include <net/net_namespace.h>
18 #include <asm/unaligned.h>
19 #include <linux/uio.h>
22 #define MAXIOC (8192) /* default meant to avoid most soft lockups */
24 static void ktcomplete(struct frame
*, struct sk_buff
*);
25 static int count_targets(struct aoedev
*d
, int *untainted
);
27 static struct buf
*nextbuf(struct aoedev
*);
29 static int aoe_deadsecs
= 60 * 3;
30 module_param(aoe_deadsecs
, int, 0644);
31 MODULE_PARM_DESC(aoe_deadsecs
, "After aoe_deadsecs seconds, give up and fail dev.");
33 static int aoe_maxout
= 64;
34 module_param(aoe_maxout
, int, 0644);
35 MODULE_PARM_DESC(aoe_maxout
,
36 "Only aoe_maxout outstanding packets for every MAC on eX.Y.");
38 static wait_queue_head_t ktiowq
;
39 static struct ktstate kts
;
41 /* io completion queue */
43 struct list_head head
;
47 static struct page
*empty_page
;
49 static struct sk_buff
*
54 skb
= alloc_skb(len
+ MAX_HEADER
, GFP_ATOMIC
);
56 skb_reserve(skb
, MAX_HEADER
);
57 skb_reset_mac_header(skb
);
58 skb_reset_network_header(skb
);
59 skb
->protocol
= __constant_htons(ETH_P_AOE
);
60 skb_checksum_none_assert(skb
);
66 getframe_deferred(struct aoedev
*d
, u32 tag
)
68 struct list_head
*head
, *pos
, *nx
;
72 list_for_each_safe(pos
, nx
, head
) {
73 f
= list_entry(pos
, struct frame
, head
);
83 getframe(struct aoedev
*d
, u32 tag
)
86 struct list_head
*head
, *pos
, *nx
;
90 head
= &d
->factive
[n
];
91 list_for_each_safe(pos
, nx
, head
) {
92 f
= list_entry(pos
, struct frame
, head
);
102 * Leave the top bit clear so we have tagspace for userland.
103 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
104 * This driver reserves tag -1 to mean "unused frame."
107 newtag(struct aoedev
*d
)
111 n
= jiffies
& 0xffff;
112 return n
|= (++d
->lasttag
& 0x7fff) << 16;
116 aoehdr_atainit(struct aoedev
*d
, struct aoetgt
*t
, struct aoe_hdr
*h
)
118 u32 host_tag
= newtag(d
);
120 memcpy(h
->src
, t
->ifp
->nd
->dev_addr
, sizeof h
->src
);
121 memcpy(h
->dst
, t
->addr
, sizeof h
->dst
);
122 h
->type
= __constant_cpu_to_be16(ETH_P_AOE
);
124 h
->major
= cpu_to_be16(d
->aoemajor
);
125 h
->minor
= d
->aoeminor
;
127 h
->tag
= cpu_to_be32(host_tag
);
133 put_lba(struct aoe_atahdr
*ah
, sector_t lba
)
136 ah
->lba1
= lba
>>= 8;
137 ah
->lba2
= lba
>>= 8;
138 ah
->lba3
= lba
>>= 8;
139 ah
->lba4
= lba
>>= 8;
140 ah
->lba5
= lba
>>= 8;
143 static struct aoeif
*
144 ifrotate(struct aoetgt
*t
)
150 if (ifp
>= &t
->ifs
[NAOEIFS
] || ifp
->nd
== NULL
)
158 skb_pool_put(struct aoedev
*d
, struct sk_buff
*skb
)
160 __skb_queue_tail(&d
->skbpool
, skb
);
163 static struct sk_buff
*
164 skb_pool_get(struct aoedev
*d
)
166 struct sk_buff
*skb
= skb_peek(&d
->skbpool
);
168 if (skb
&& atomic_read(&skb_shinfo(skb
)->dataref
) == 1) {
169 __skb_unlink(skb
, &d
->skbpool
);
172 if (skb_queue_len(&d
->skbpool
) < NSKBPOOLMAX
&&
173 (skb
= new_skb(ETH_ZLEN
)))
180 aoe_freetframe(struct frame
*f
)
190 list_add(&f
->head
, &t
->ffree
);
193 static struct frame
*
194 newtframe(struct aoedev
*d
, struct aoetgt
*t
)
198 struct list_head
*pos
;
200 if (list_empty(&t
->ffree
)) {
201 if (t
->falloc
>= NSKBPOOLMAX
*2)
203 f
= kcalloc(1, sizeof(*f
), GFP_ATOMIC
);
211 f
= list_entry(pos
, struct frame
, head
);
216 f
->skb
= skb
= new_skb(ETH_ZLEN
);
218 bail
: aoe_freetframe(f
);
223 if (atomic_read(&skb_shinfo(skb
)->dataref
) != 1) {
224 skb
= skb_pool_get(d
);
227 skb_pool_put(d
, f
->skb
);
231 skb
->truesize
-= skb
->data_len
;
232 skb_shinfo(skb
)->nr_frags
= skb
->data_len
= 0;
237 static struct frame
*
238 newframe(struct aoedev
*d
)
241 struct aoetgt
*t
, **tt
;
246 if (!d
->targets
|| !d
->targets
[0]) {
247 printk(KERN_ERR
"aoe: NULL TARGETS!\n");
250 tt
= d
->tgt
; /* last used target */
251 for (use_tainted
= 0, has_untainted
= 0;;) {
253 if (tt
>= &d
->targets
[d
->ntargets
] || !*tt
)
260 if (t
->nout
< t
->maxout
261 && (use_tainted
|| !t
->taint
)
270 if (tt
== d
->tgt
) { /* we've looped and found nada */
271 if (!use_tainted
&& !has_untainted
)
279 d
->flags
|= DEVFL_KICKME
;
285 skb_fillup(struct sk_buff
*skb
, struct bio_vec
*bv
, ulong off
, ulong cnt
)
290 fcnt
= bv
->bv_len
- (off
- bv
->bv_offset
);
293 skb_fill_page_desc(skb
, frag
++, bv
->bv_page
, off
, fcnt
);
303 fhash(struct frame
*f
)
305 struct aoedev
*d
= f
->t
->d
;
308 n
= f
->tag
% NFACTIVE
;
309 list_add_tail(&f
->head
, &d
->factive
[n
]);
313 ata_rw_frameinit(struct frame
*f
)
317 struct aoe_atahdr
*ah
;
319 char writebit
, extbit
;
322 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
323 ah
= (struct aoe_atahdr
*) (h
+ 1);
324 skb_put(skb
, sizeof(*h
) + sizeof(*ah
));
325 memset(h
, 0, skb
->len
);
331 f
->tag
= aoehdr_atainit(t
->d
, t
, h
);
337 f
->lba
= f
->buf
->sector
;
339 /* set up ata header */
340 ah
->scnt
= f
->bcnt
>> 9;
342 if (t
->d
->flags
& DEVFL_EXT
) {
343 ah
->aflags
|= AOEAFL_EXT
;
347 ah
->lba3
|= 0xe0; /* LBA bit + obsolete 0xa0 */
349 if (f
->buf
&& bio_data_dir(f
->buf
->bio
) == WRITE
) {
350 skb_fillup(skb
, f
->bv
, f
->bv_off
, f
->bcnt
);
351 ah
->aflags
|= AOEAFL_WRITE
;
353 skb
->data_len
= f
->bcnt
;
354 skb
->truesize
+= f
->bcnt
;
361 ah
->cmdstat
= ATA_CMD_PIO_READ
| writebit
| extbit
;
362 skb
->dev
= t
->ifp
->nd
;
366 aoecmd_ata_rw(struct aoedev
*d
)
372 struct sk_buff_head queue
;
385 if (bcnt
> buf
->resid
)
389 f
->bv_off
= f
->bv
->bv_offset
+ (f
->bv
->bv_len
- buf
->bv_resid
);
391 if (fbcnt
< buf
->bv_resid
) {
392 buf
->bv_resid
-= fbcnt
;
396 fbcnt
-= buf
->bv_resid
;
397 buf
->resid
-= buf
->bv_resid
;
398 if (buf
->resid
== 0) {
403 buf
->bv_resid
= buf
->bv
->bv_len
;
404 WARN_ON(buf
->bv_resid
== 0);
407 /* initialize the headers & frame */
412 /* mark all tracking fields and load out */
413 buf
->nframesout
+= 1;
414 buf
->sector
+= bcnt
>> 9;
416 skb
= skb_clone(f
->skb
, GFP_ATOMIC
);
418 do_gettimeofday(&f
->sent
);
419 f
->sent_jiffs
= (u32
) jiffies
;
420 __skb_queue_head_init(&queue
);
421 __skb_queue_tail(&queue
, skb
);
427 /* some callers cannot sleep, and they can call this function,
428 * transmitting the packets later, when interrupts are on
431 aoecmd_cfg_pkts(ushort aoemajor
, unsigned char aoeminor
, struct sk_buff_head
*queue
)
434 struct aoe_cfghdr
*ch
;
436 struct net_device
*ifp
;
439 for_each_netdev_rcu(&init_net
, ifp
) {
441 if (!is_aoe_netif(ifp
))
444 skb
= new_skb(sizeof *h
+ sizeof *ch
);
446 printk(KERN_INFO
"aoe: skb alloc failure\n");
449 skb_put(skb
, sizeof *h
+ sizeof *ch
);
451 __skb_queue_tail(queue
, skb
);
452 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
453 memset(h
, 0, sizeof *h
+ sizeof *ch
);
455 memset(h
->dst
, 0xff, sizeof h
->dst
);
456 memcpy(h
->src
, ifp
->dev_addr
, sizeof h
->src
);
457 h
->type
= __constant_cpu_to_be16(ETH_P_AOE
);
459 h
->major
= cpu_to_be16(aoemajor
);
470 resend(struct aoedev
*d
, struct frame
*f
)
473 struct sk_buff_head queue
;
475 struct aoe_atahdr
*ah
;
483 if (ifrotate(t
) == NULL
) {
484 /* probably can't happen, but set it up to fail anyway */
485 pr_info("aoe: resend: no interfaces to rotate to.\n");
489 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
490 ah
= (struct aoe_atahdr
*) (h
+1);
492 if (!(f
->flags
& FFL_PROBE
)) {
493 snprintf(buf
, sizeof(buf
),
494 "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
495 "retransmit", d
->aoemajor
, d
->aoeminor
,
497 h
->src
, h
->dst
, t
->nout
);
503 h
->tag
= cpu_to_be32(n
);
504 memcpy(h
->dst
, t
->addr
, sizeof h
->dst
);
505 memcpy(h
->src
, t
->ifp
->nd
->dev_addr
, sizeof h
->src
);
507 skb
->dev
= t
->ifp
->nd
;
508 skb
= skb_clone(skb
, GFP_ATOMIC
);
511 do_gettimeofday(&f
->sent
);
512 f
->sent_jiffs
= (u32
) jiffies
;
513 __skb_queue_head_init(&queue
);
514 __skb_queue_tail(&queue
, skb
);
519 tsince_hr(struct frame
*f
)
524 do_gettimeofday(&now
);
525 n
= now
.tv_usec
- f
->sent
.tv_usec
;
526 n
+= (now
.tv_sec
- f
->sent
.tv_sec
) * USEC_PER_SEC
;
531 /* For relatively long periods, use jiffies to avoid
532 * discrepancies caused by updates to the system time.
534 * On system with HZ of 1000, 32-bits is over 49 days
535 * worth of jiffies, or over 71 minutes worth of usecs.
537 * Jiffies overflow is handled by subtraction of unsigned ints:
538 * (gdb) print (unsigned) 2 - (unsigned) 0xfffffffe
542 if (n
> USEC_PER_SEC
/ 4) {
543 n
= ((u32
) jiffies
) - f
->sent_jiffs
;
544 n
*= USEC_PER_SEC
/ HZ
;
555 n
= jiffies
& 0xffff;
559 return jiffies_to_usecs(n
+ 1);
562 static struct aoeif
*
563 getif(struct aoetgt
*t
, struct net_device
*nd
)
576 ejectif(struct aoetgt
*t
, struct aoeif
*ifp
)
579 struct net_device
*nd
;
583 e
= t
->ifs
+ NAOEIFS
- 1;
584 n
= (e
- ifp
) * sizeof *ifp
;
585 memmove(ifp
, ifp
+1, n
);
590 static struct frame
*
591 reassign_frame(struct frame
*f
)
596 nf
= newframe(f
->t
->d
);
610 nf
->bv_off
= f
->bv_off
;
612 nf
->waited_total
= f
->waited_total
;
614 nf
->sent_jiffs
= f
->sent_jiffs
;
621 probe(struct aoetgt
*t
)
626 struct sk_buff_head queue
;
633 pr_err("%s %pm for e%ld.%d: %s\n",
634 "aoe: cannot probe remote address",
636 (long) d
->aoemajor
, d
->aoeminor
,
637 "no frame available");
640 f
->flags
|= FFL_PROBE
;
642 f
->bcnt
= t
->d
->maxbcnt
? t
->d
->maxbcnt
: DEFAULTBCNT
;
645 for (frag
= 0, n
= f
->bcnt
; n
> 0; ++frag
, n
-= m
) {
650 skb_fill_page_desc(skb
, frag
, empty_page
, 0, m
);
653 skb
->data_len
= f
->bcnt
;
654 skb
->truesize
+= f
->bcnt
;
656 skb
= skb_clone(f
->skb
, GFP_ATOMIC
);
658 do_gettimeofday(&f
->sent
);
659 f
->sent_jiffs
= (u32
) jiffies
;
660 __skb_queue_head_init(&queue
);
661 __skb_queue_tail(&queue
, skb
);
667 rto(struct aoedev
*d
)
671 t
= 2 * d
->rttavg
>> RTTSCALE
;
672 t
+= 8 * d
->rttdev
>> RTTDSCALE
;
680 rexmit_deferred(struct aoedev
*d
)
685 struct list_head
*pos
, *nx
, *head
;
689 count_targets(d
, &untainted
);
692 list_for_each_safe(pos
, nx
, head
) {
693 f
= list_entry(pos
, struct frame
, head
);
696 if (!(f
->flags
& FFL_PROBE
)) {
697 nf
= reassign_frame(f
);
699 if (t
->nout_probes
== 0
704 list_replace(&f
->head
, &nf
->head
);
710 } else if (untainted
< 1) {
711 /* don't probe w/o other untainted aoetgts */
713 } else if (tsince_hr(f
) < t
->taint
* rto(d
)) {
714 /* reprobe slowly when taint is high */
717 } else if (f
->flags
& FFL_PROBE
) {
718 stop_probe
: /* don't probe untainted aoetgts */
721 /* leaving d->kicked, because this is routine */
722 f
->t
->d
->flags
|= DEVFL_KICKME
;
725 if (t
->nout
>= t
->maxout
)
729 if (f
->flags
& FFL_PROBE
)
731 since
= tsince_hr(f
);
733 f
->waited_total
+= since
;
738 /* An aoetgt accumulates demerits quickly, and successful
739 * probing redeems the aoetgt slowly.
742 scorn(struct aoetgt
*t
)
747 t
->taint
+= t
->taint
* 2;
750 if (t
->taint
> MAX_TAINT
)
751 t
->taint
= MAX_TAINT
;
755 count_targets(struct aoedev
*d
, int *untainted
)
759 for (i
= good
= 0; i
< d
->ntargets
&& d
->targets
[i
]; ++i
)
760 if (d
->targets
[i
]->taint
== 0)
769 rexmit_timer(ulong vp
)
775 struct list_head
*head
, *pos
, *nx
;
777 register long timeout
;
780 int utgts
; /* number of aoetgt descriptors (not slots) */
783 d
= (struct aoedev
*) vp
;
785 spin_lock_irqsave(&d
->lock
, flags
);
787 /* timeout based on observed timings and variations */
790 utgts
= count_targets(d
, NULL
);
792 if (d
->flags
& DEVFL_TKILL
) {
793 spin_unlock_irqrestore(&d
->lock
, flags
);
797 /* collect all frames to rexmit into flist */
798 for (i
= 0; i
< NFACTIVE
; i
++) {
799 head
= &d
->factive
[i
];
800 list_for_each_safe(pos
, nx
, head
) {
801 f
= list_entry(pos
, struct frame
, head
);
802 if (tsince_hr(f
) < timeout
)
803 break; /* end of expired frames */
804 /* move to flist for later processing */
805 list_move_tail(pos
, &flist
);
809 /* process expired frames */
810 while (!list_empty(&flist
)) {
812 f
= list_entry(pos
, struct frame
, head
);
813 since
= tsince_hr(f
);
814 n
= f
->waited_total
+ since
;
818 && !(f
->flags
& FFL_PROBE
)) {
819 /* Waited too long. Device failure.
820 * Hang all frames on first hash bucket for downdev
823 list_splice(&flist
, &d
->factive
[0]);
829 n
= f
->waited
+ since
;
831 if (aoe_deadsecs
&& utgts
> 0
832 && (n
> aoe_deadsecs
/ utgts
|| n
> HARD_SCORN_SECS
))
833 scorn(t
); /* avoid this target */
835 if (t
->maxout
!= 1) {
836 t
->ssthresh
= t
->maxout
/ 2;
840 if (f
->flags
& FFL_PROBE
) {
843 ifp
= getif(t
, f
->skb
->dev
);
844 if (ifp
&& ++ifp
->lost
> (t
->nframes
<< 1)
845 && (ifp
!= t
->ifs
|| t
->ifs
[1].nd
)) {
850 list_move_tail(pos
, &d
->rexmitq
);
856 if ((d
->flags
& DEVFL_KICKME
) && d
->blkq
) {
857 d
->flags
&= ~DEVFL_KICKME
;
858 d
->blkq
->request_fn(d
->blkq
);
861 d
->timer
.expires
= jiffies
+ TIMERTICK
;
862 add_timer(&d
->timer
);
864 spin_unlock_irqrestore(&d
->lock
, flags
);
868 rqbiocnt(struct request
*r
)
873 __rq_for_each_bio(bio
, r
)
878 /* This can be removed if we are certain that no users of the block
879 * layer will ever use zero-count pages in bios. Otherwise we have to
880 * protect against the put_page sometimes done by the network layer.
882 * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for
885 * We cannot use get_page in the workaround, because it insists on a
886 * positive page count as a precondition. So we use _count directly.
889 bio_pageinc(struct bio
*bio
)
895 bio_for_each_segment(bv
, bio
, i
) {
897 /* Non-zero page count for non-head members of
898 * compound pages is no longer allowed by the kernel,
899 * but this has never been seen here.
901 if (unlikely(PageCompound(page
)))
902 if (compound_trans_head(page
) != page
) {
903 pr_crit("page tail used for block I/O\n");
906 atomic_inc(&page
->_count
);
911 bio_pagedec(struct bio
*bio
)
916 bio_for_each_segment(bv
, bio
, i
)
917 atomic_dec(&bv
->bv_page
->_count
);
921 bufinit(struct buf
*buf
, struct request
*rq
, struct bio
*bio
)
923 memset(buf
, 0, sizeof(*buf
));
926 buf
->resid
= bio
->bi_size
;
927 buf
->sector
= bio
->bi_sector
;
929 buf
->bv
= bio_iovec(bio
);
930 buf
->bv_resid
= buf
->bv
->bv_len
;
931 WARN_ON(buf
->bv_resid
== 0);
935 nextbuf(struct aoedev
*d
)
938 struct request_queue
*q
;
944 return NULL
; /* initializing */
949 rq
= blk_peek_request(q
);
952 blk_start_request(rq
);
954 d
->ip
.nxbio
= rq
->bio
;
955 rq
->special
= (void *) rqbiocnt(rq
);
957 buf
= mempool_alloc(d
->bufpool
, GFP_ATOMIC
);
959 pr_err("aoe: nextbuf: unable to mempool_alloc!\n");
963 bufinit(buf
, rq
, bio
);
968 return d
->ip
.buf
= buf
;
971 /* enters with d->lock held */
973 aoecmd_work(struct aoedev
*d
)
976 while (aoecmd_ata_rw(d
))
980 /* this function performs work that has been deferred until sleeping is OK
983 aoecmd_sleepwork(struct work_struct
*work
)
985 struct aoedev
*d
= container_of(work
, struct aoedev
, work
);
986 struct block_device
*bd
;
989 if (d
->flags
& DEVFL_GDALLOC
)
992 if (d
->flags
& DEVFL_NEWSIZE
) {
993 ssize
= get_capacity(d
->gd
);
994 bd
= bdget_disk(d
->gd
, 0);
996 mutex_lock(&bd
->bd_inode
->i_mutex
);
997 i_size_write(bd
->bd_inode
, (loff_t
)ssize
<<9);
998 mutex_unlock(&bd
->bd_inode
->i_mutex
);
1001 spin_lock_irq(&d
->lock
);
1002 d
->flags
|= DEVFL_UP
;
1003 d
->flags
&= ~DEVFL_NEWSIZE
;
1004 spin_unlock_irq(&d
->lock
);
1009 ata_ident_fixstring(u16
*id
, int ns
)
1015 *id
++ = s
>> 8 | s
<< 8;
1020 ataid_complete(struct aoedev
*d
, struct aoetgt
*t
, unsigned char *id
)
1025 /* word 83: command set supported */
1026 n
= get_unaligned_le16(&id
[83 << 1]);
1028 /* word 86: command set/feature enabled */
1029 n
|= get_unaligned_le16(&id
[86 << 1]);
1031 if (n
& (1<<10)) { /* bit 10: LBA 48 */
1032 d
->flags
|= DEVFL_EXT
;
1034 /* word 100: number lba48 sectors */
1035 ssize
= get_unaligned_le64(&id
[100 << 1]);
1037 /* set as in ide-disk.c:init_idedisk_capacity */
1038 d
->geo
.cylinders
= ssize
;
1039 d
->geo
.cylinders
/= (255 * 63);
1041 d
->geo
.sectors
= 63;
1043 d
->flags
&= ~DEVFL_EXT
;
1045 /* number lba28 sectors */
1046 ssize
= get_unaligned_le32(&id
[60 << 1]);
1048 /* NOTE: obsolete in ATA 6 */
1049 d
->geo
.cylinders
= get_unaligned_le16(&id
[54 << 1]);
1050 d
->geo
.heads
= get_unaligned_le16(&id
[55 << 1]);
1051 d
->geo
.sectors
= get_unaligned_le16(&id
[56 << 1]);
1054 ata_ident_fixstring((u16
*) &id
[10<<1], 10); /* serial */
1055 ata_ident_fixstring((u16
*) &id
[23<<1], 4); /* firmware */
1056 ata_ident_fixstring((u16
*) &id
[27<<1], 20); /* model */
1057 memcpy(d
->ident
, id
, sizeof(d
->ident
));
1059 if (d
->ssize
!= ssize
)
1061 "aoe: %pm e%ld.%d v%04x has %llu sectors\n",
1063 d
->aoemajor
, d
->aoeminor
,
1064 d
->fw_ver
, (long long)ssize
);
1067 if (d
->flags
& (DEVFL_GDALLOC
|DEVFL_NEWSIZE
))
1069 if (d
->gd
!= NULL
) {
1070 set_capacity(d
->gd
, ssize
);
1071 d
->flags
|= DEVFL_NEWSIZE
;
1073 d
->flags
|= DEVFL_GDALLOC
;
1074 schedule_work(&d
->work
);
1078 calc_rttavg(struct aoedev
*d
, struct aoetgt
*t
, int rtt
)
1084 /* cf. Congestion Avoidance and Control, Jacobson & Karels, 1988 */
1085 n
-= d
->rttavg
>> RTTSCALE
;
1089 n
-= d
->rttdev
>> RTTDSCALE
;
1092 if (!t
|| t
->maxout
>= t
->nframes
)
1094 if (t
->maxout
< t
->ssthresh
)
1096 else if (t
->nout
== t
->maxout
&& t
->next_cwnd
-- == 0) {
1098 t
->next_cwnd
= t
->maxout
;
1102 static struct aoetgt
*
1103 gettgt(struct aoedev
*d
, char *addr
)
1105 struct aoetgt
**t
, **e
;
1108 e
= t
+ d
->ntargets
;
1109 for (; t
< e
&& *t
; t
++)
1110 if (memcmp((*t
)->addr
, addr
, sizeof((*t
)->addr
)) == 0)
1116 bvcpy(struct bio_vec
*bv
, ulong off
, struct sk_buff
*skb
, long cnt
)
1122 fcnt
= bv
->bv_len
- (off
- bv
->bv_offset
);
1125 p
= page_address(bv
->bv_page
) + off
;
1126 skb_copy_bits(skb
, soff
, p
, fcnt
);
1132 off
= bv
->bv_offset
;
1137 aoe_end_request(struct aoedev
*d
, struct request
*rq
, int fastfail
)
1141 struct request_queue
*q
;
1148 bok
= !fastfail
&& test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
1149 } while (__blk_end_request(rq
, bok
? 0 : -EIO
, bio
->bi_size
));
1151 /* cf. http://lkml.org/lkml/2006/10/31/28 */
1157 aoe_end_buf(struct aoedev
*d
, struct buf
*buf
)
1162 if (buf
== d
->ip
.buf
)
1165 bio_pagedec(buf
->bio
);
1166 mempool_free(buf
, d
->bufpool
);
1167 n
= (unsigned long) rq
->special
;
1168 rq
->special
= (void *) --n
;
1170 aoe_end_request(d
, rq
, 0);
1174 ktiocomplete(struct frame
*f
)
1176 struct aoe_hdr
*hin
, *hout
;
1177 struct aoe_atahdr
*ahin
, *ahout
;
1179 struct sk_buff
*skb
;
1193 if (f
->flags
& FFL_PROBE
)
1195 if (!skb
) /* just fail the buf. */
1198 hout
= (struct aoe_hdr
*) skb_mac_header(f
->skb
);
1199 ahout
= (struct aoe_atahdr
*) (hout
+1);
1201 hin
= (struct aoe_hdr
*) skb
->data
;
1202 skb_pull(skb
, sizeof(*hin
));
1203 ahin
= (struct aoe_atahdr
*) skb
->data
;
1204 skb_pull(skb
, sizeof(*ahin
));
1205 if (ahin
->cmdstat
& 0xa9) { /* these bits cleared on success */
1206 pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
1207 ahout
->cmdstat
, ahin
->cmdstat
,
1208 d
->aoemajor
, d
->aoeminor
);
1210 clear_bit(BIO_UPTODATE
, &buf
->bio
->bi_flags
);
1214 n
= ahout
->scnt
<< 9;
1215 switch (ahout
->cmdstat
) {
1216 case ATA_CMD_PIO_READ
:
1217 case ATA_CMD_PIO_READ_EXT
:
1219 pr_err("%s e%ld.%d. skb->len=%d need=%ld\n",
1220 "aoe: runt data size in read from",
1221 (long) d
->aoemajor
, d
->aoeminor
,
1223 clear_bit(BIO_UPTODATE
, &buf
->bio
->bi_flags
);
1226 bvcpy(f
->bv
, f
->bv_off
, skb
, n
);
1227 case ATA_CMD_PIO_WRITE
:
1228 case ATA_CMD_PIO_WRITE_EXT
:
1229 spin_lock_irq(&d
->lock
);
1230 ifp
= getif(t
, skb
->dev
);
1233 spin_unlock_irq(&d
->lock
);
1235 case ATA_CMD_ID_ATA
:
1236 if (skb
->len
< 512) {
1237 pr_info("%s e%ld.%d. skb->len=%d need=512\n",
1238 "aoe: runt data size in ataid from",
1239 (long) d
->aoemajor
, d
->aoeminor
,
1243 if (skb_linearize(skb
))
1245 spin_lock_irq(&d
->lock
);
1246 ataid_complete(d
, t
, skb
->data
);
1247 spin_unlock_irq(&d
->lock
);
1250 pr_info("aoe: unrecognized ata command %2.2Xh for %d.%d\n",
1252 be16_to_cpu(get_unaligned(&hin
->major
)),
1256 spin_lock_irq(&d
->lock
);
1259 && t
->nout_probes
== 0) {
1260 count_targets(d
, &untainted
);
1261 if (untainted
> 0) {
1269 if (buf
&& --buf
->nframesout
== 0 && buf
->resid
== 0)
1270 aoe_end_buf(d
, buf
);
1272 spin_unlock_irq(&d
->lock
);
1277 /* Enters with iocq.lock held.
1278 * Returns true iff responses needing processing remain.
1284 struct list_head
*pos
;
1287 for (i
= 0; ; ++i
) {
1290 if (list_empty(&iocq
.head
))
1292 pos
= iocq
.head
.next
;
1294 spin_unlock_irq(&iocq
.lock
);
1295 f
= list_entry(pos
, struct frame
, head
);
1297 spin_lock_irq(&iocq
.lock
);
1305 DECLARE_WAITQUEUE(wait
, current
);
1309 current
->flags
|= PF_NOFREEZE
;
1310 set_user_nice(current
, -10);
1311 complete(&k
->rendez
); /* tell spawner we're running */
1313 spin_lock_irq(k
->lock
);
1316 add_wait_queue(k
->waitq
, &wait
);
1317 __set_current_state(TASK_INTERRUPTIBLE
);
1319 spin_unlock_irq(k
->lock
);
1322 remove_wait_queue(k
->waitq
, &wait
);
1325 } while (!kthread_should_stop());
1326 complete(&k
->rendez
); /* tell spawner we're stopping */
1331 aoe_ktstop(struct ktstate
*k
)
1333 kthread_stop(k
->task
);
1334 wait_for_completion(&k
->rendez
);
1338 aoe_ktstart(struct ktstate
*k
)
1340 struct task_struct
*task
;
1342 init_completion(&k
->rendez
);
1343 task
= kthread_run(kthread
, k
, k
->name
);
1344 if (task
== NULL
|| IS_ERR(task
))
1347 wait_for_completion(&k
->rendez
); /* allow kthread to start */
1348 init_completion(&k
->rendez
); /* for waiting for exit later */
1352 /* pass it off to kthreads for processing */
1354 ktcomplete(struct frame
*f
, struct sk_buff
*skb
)
1359 spin_lock_irqsave(&iocq
.lock
, flags
);
1360 list_add_tail(&f
->head
, &iocq
.head
);
1361 spin_unlock_irqrestore(&iocq
.lock
, flags
);
1366 aoecmd_ata_rsp(struct sk_buff
*skb
)
1376 h
= (struct aoe_hdr
*) skb
->data
;
1377 aoemajor
= be16_to_cpu(get_unaligned(&h
->major
));
1378 d
= aoedev_by_aoeaddr(aoemajor
, h
->minor
, 0);
1380 snprintf(ebuf
, sizeof ebuf
, "aoecmd_ata_rsp: ata response "
1381 "for unknown device %d.%d\n",
1382 aoemajor
, h
->minor
);
1387 spin_lock_irqsave(&d
->lock
, flags
);
1389 n
= be32_to_cpu(get_unaligned(&h
->tag
));
1392 calc_rttavg(d
, f
->t
, tsince_hr(f
));
1394 if (f
->flags
& FFL_PROBE
)
1395 f
->t
->nout_probes
--;
1397 f
= getframe_deferred(d
, n
);
1399 calc_rttavg(d
, NULL
, tsince_hr(f
));
1401 calc_rttavg(d
, NULL
, tsince(n
));
1402 spin_unlock_irqrestore(&d
->lock
, flags
);
1404 snprintf(ebuf
, sizeof(ebuf
),
1405 "%15s e%d.%d tag=%08x@%08lx s=%pm d=%pm\n",
1407 get_unaligned_be16(&h
->major
),
1409 get_unaligned_be32(&h
->tag
),
1419 spin_unlock_irqrestore(&d
->lock
, flags
);
1424 * Note here that we do not perform an aoedev_put, as we are
1425 * leaving this reference for the ktio to release.
1431 aoecmd_cfg(ushort aoemajor
, unsigned char aoeminor
)
1433 struct sk_buff_head queue
;
1435 __skb_queue_head_init(&queue
);
1436 aoecmd_cfg_pkts(aoemajor
, aoeminor
, &queue
);
1437 aoenet_xmit(&queue
);
1441 aoecmd_ata_id(struct aoedev
*d
)
1444 struct aoe_atahdr
*ah
;
1446 struct sk_buff
*skb
;
1455 /* initialize the headers & frame */
1457 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
1458 ah
= (struct aoe_atahdr
*) (h
+1);
1459 skb_put(skb
, sizeof *h
+ sizeof *ah
);
1460 memset(h
, 0, skb
->len
);
1461 f
->tag
= aoehdr_atainit(d
, t
, h
);
1465 f
->waited_total
= 0;
1467 /* set up ata header */
1469 ah
->cmdstat
= ATA_CMD_ID_ATA
;
1472 skb
->dev
= t
->ifp
->nd
;
1474 d
->rttavg
= RTTAVG_INIT
;
1475 d
->rttdev
= RTTDEV_INIT
;
1476 d
->timer
.function
= rexmit_timer
;
1478 skb
= skb_clone(skb
, GFP_ATOMIC
);
1480 do_gettimeofday(&f
->sent
);
1481 f
->sent_jiffs
= (u32
) jiffies
;
1487 static struct aoetgt
**
1488 grow_targets(struct aoedev
*d
)
1495 tt
= kcalloc(newn
, sizeof(*d
->targets
), GFP_ATOMIC
);
1498 memmove(tt
, d
->targets
, sizeof(*d
->targets
) * oldn
);
1499 d
->tgt
= tt
+ (d
->tgt
- d
->targets
);
1504 return &d
->targets
[oldn
];
1507 static struct aoetgt
*
1508 addtgt(struct aoedev
*d
, char *addr
, ulong nframes
)
1510 struct aoetgt
*t
, **tt
, **te
;
1513 te
= tt
+ d
->ntargets
;
1514 for (; tt
< te
&& *tt
; tt
++)
1518 tt
= grow_targets(d
);
1522 t
= kzalloc(sizeof(*t
), GFP_ATOMIC
);
1525 t
->nframes
= nframes
;
1527 memcpy(t
->addr
, addr
, sizeof t
->addr
);
1530 t
->maxout
= t
->nframes
/ 2;
1531 INIT_LIST_HEAD(&t
->ffree
);
1535 pr_info("aoe: cannot allocate memory to add target\n");
1540 setdbcnt(struct aoedev
*d
)
1542 struct aoetgt
**t
, **e
;
1546 e
= t
+ d
->ntargets
;
1547 for (; t
< e
&& *t
; t
++)
1548 if (bcnt
== 0 || bcnt
> (*t
)->minbcnt
)
1549 bcnt
= (*t
)->minbcnt
;
1550 if (bcnt
!= d
->maxbcnt
) {
1552 pr_info("aoe: e%ld.%d: setting %d byte data frames\n",
1553 d
->aoemajor
, d
->aoeminor
, bcnt
);
1558 setifbcnt(struct aoetgt
*t
, struct net_device
*nd
, int bcnt
)
1561 struct aoeif
*p
, *e
;
1568 for (; p
< e
; p
++) {
1570 break; /* end of the valid interfaces */
1572 p
->bcnt
= bcnt
; /* we're updating */
1574 } else if (minbcnt
> p
->bcnt
)
1575 minbcnt
= p
->bcnt
; /* find the min interface */
1579 pr_err("aoe: device setifbcnt failure; too many interfaces.\n");
1586 t
->minbcnt
= minbcnt
;
1591 aoecmd_cfg_rsp(struct sk_buff
*skb
)
1595 struct aoe_cfghdr
*ch
;
1597 ulong flags
, aoemajor
;
1599 struct sk_buff_head queue
;
1603 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
1604 ch
= (struct aoe_cfghdr
*) (h
+1);
1607 * Enough people have their dip switches set backwards to
1608 * warrant a loud message for this special case.
1610 aoemajor
= get_unaligned_be16(&h
->major
);
1611 if (aoemajor
== 0xfff) {
1612 printk(KERN_ERR
"aoe: Warning: shelf address is all ones. "
1613 "Check shelf dip switches.\n");
1616 if (aoemajor
== 0xffff) {
1617 pr_info("aoe: e%ld.%d: broadcast shelf number invalid\n",
1618 aoemajor
, (int) h
->minor
);
1621 if (h
->minor
== 0xff) {
1622 pr_info("aoe: e%ld.%d: broadcast slot number invalid\n",
1623 aoemajor
, (int) h
->minor
);
1627 n
= be16_to_cpu(ch
->bufcnt
);
1628 if (n
> aoe_maxout
) /* keep it reasonable */
1631 d
= aoedev_by_aoeaddr(aoemajor
, h
->minor
, 1);
1633 pr_info("aoe: device allocation failure\n");
1637 spin_lock_irqsave(&d
->lock
, flags
);
1639 t
= gettgt(d
, h
->src
);
1645 t
= addtgt(d
, h
->src
, n
);
1650 n
-= sizeof(struct aoe_hdr
) + sizeof(struct aoe_atahdr
);
1654 n
= n
? n
* 512 : DEFAULTBCNT
;
1655 setifbcnt(t
, skb
->dev
, n
);
1657 /* don't change users' perspective */
1658 if (d
->nopen
== 0) {
1659 d
->fw_ver
= be16_to_cpu(ch
->fwver
);
1660 sl
= aoecmd_ata_id(d
);
1663 spin_unlock_irqrestore(&d
->lock
, flags
);
1666 __skb_queue_head_init(&queue
);
1667 __skb_queue_tail(&queue
, sl
);
1668 aoenet_xmit(&queue
);
1673 aoecmd_wreset(struct aoetgt
*t
)
1676 t
->ssthresh
= t
->nframes
/ 2;
1677 t
->next_cwnd
= t
->nframes
;
1681 aoecmd_cleanslate(struct aoedev
*d
)
1683 struct aoetgt
**t
, **te
;
1685 d
->rttavg
= RTTAVG_INIT
;
1686 d
->rttdev
= RTTDEV_INIT
;
1690 te
= t
+ d
->ntargets
;
1691 for (; t
< te
&& *t
; t
++)
1696 aoe_failbuf(struct aoedev
*d
, struct buf
*buf
)
1701 clear_bit(BIO_UPTODATE
, &buf
->bio
->bi_flags
);
1702 if (buf
->nframesout
== 0)
1703 aoe_end_buf(d
, buf
);
1707 aoe_flush_iocq(void)
1712 struct list_head
*pos
;
1713 struct sk_buff
*skb
;
1716 spin_lock_irqsave(&iocq
.lock
, flags
);
1717 list_splice_init(&iocq
.head
, &flist
);
1718 spin_unlock_irqrestore(&iocq
.lock
, flags
);
1719 while (!list_empty(&flist
)) {
1722 f
= list_entry(pos
, struct frame
, head
);
1725 spin_lock_irqsave(&d
->lock
, flags
);
1727 f
->buf
->nframesout
--;
1728 aoe_failbuf(d
, f
->buf
);
1731 spin_unlock_irqrestore(&d
->lock
, flags
);
1742 /* get_zeroed_page returns page with ref count 1 */
1743 p
= (void *) get_zeroed_page(GFP_KERNEL
| __GFP_REPEAT
);
1746 empty_page
= virt_to_page(p
);
1748 INIT_LIST_HEAD(&iocq
.head
);
1749 spin_lock_init(&iocq
.lock
);
1750 init_waitqueue_head(&ktiowq
);
1751 kts
.name
= "aoe_ktio";
1753 kts
.waitq
= &ktiowq
;
1754 kts
.lock
= &iocq
.lock
;
1755 return aoe_ktstart(&kts
);
1764 free_page((unsigned long) page_address(empty_page
));