Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / s390 / net / qeth_core_main.c
blob13ef46b9d388714abb3c72efaa1dcffc6f259a43
1 /*
2 * drivers/s390/net/qeth_core_main.c
4 * Copyright IBM Corp. 2007, 2009
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
11 #define KMSG_COMPONENT "qeth"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/ip.h>
20 #include <linux/tcp.h>
21 #include <linux/mii.h>
22 #include <linux/kthread.h>
23 #include <linux/slab.h>
25 #include <asm/ebcdic.h>
26 #include <asm/io.h>
28 #include "qeth_core.h"
30 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
31 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
32 /* N P A M L V H */
33 [QETH_DBF_SETUP] = {"qeth_setup",
34 8, 1, 8, 5, &debug_hex_ascii_view, NULL},
35 [QETH_DBF_QERR] = {"qeth_qerr",
36 2, 1, 8, 2, &debug_hex_ascii_view, NULL},
37 [QETH_DBF_TRACE] = {"qeth_trace",
38 4, 1, 8, 3, &debug_hex_ascii_view, NULL},
39 [QETH_DBF_MSG] = {"qeth_msg",
40 8, 1, 128, 3, &debug_sprintf_view, NULL},
41 [QETH_DBF_SENSE] = {"qeth_sense",
42 2, 1, 64, 2, &debug_hex_ascii_view, NULL},
43 [QETH_DBF_MISC] = {"qeth_misc",
44 2, 1, 256, 2, &debug_hex_ascii_view, NULL},
45 [QETH_DBF_CTRL] = {"qeth_control",
46 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
48 EXPORT_SYMBOL_GPL(qeth_dbf);
50 struct qeth_card_list_struct qeth_core_card_list;
51 EXPORT_SYMBOL_GPL(qeth_core_card_list);
52 struct kmem_cache *qeth_core_header_cache;
53 EXPORT_SYMBOL_GPL(qeth_core_header_cache);
55 static struct device *qeth_core_root_dev;
56 static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY;
57 static struct lock_class_key qdio_out_skb_queue_key;
59 static void qeth_send_control_data_cb(struct qeth_channel *,
60 struct qeth_cmd_buffer *);
61 static int qeth_issue_next_read(struct qeth_card *);
62 static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
63 static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32);
64 static void qeth_free_buffer_pool(struct qeth_card *);
65 static int qeth_qdio_establish(struct qeth_card *);
68 static inline void __qeth_fill_buffer_frag(struct sk_buff *skb,
69 struct qdio_buffer *buffer, int is_tso,
70 int *next_element_to_fill)
72 struct skb_frag_struct *frag;
73 int fragno;
74 unsigned long addr;
75 int element, cnt, dlen;
77 fragno = skb_shinfo(skb)->nr_frags;
78 element = *next_element_to_fill;
79 dlen = 0;
81 if (is_tso)
82 buffer->element[element].flags =
83 SBAL_FLAGS_MIDDLE_FRAG;
84 else
85 buffer->element[element].flags =
86 SBAL_FLAGS_FIRST_FRAG;
87 dlen = skb->len - skb->data_len;
88 if (dlen) {
89 buffer->element[element].addr = skb->data;
90 buffer->element[element].length = dlen;
91 element++;
93 for (cnt = 0; cnt < fragno; cnt++) {
94 frag = &skb_shinfo(skb)->frags[cnt];
95 addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
96 frag->page_offset;
97 buffer->element[element].addr = (char *)addr;
98 buffer->element[element].length = frag->size;
99 if (cnt < (fragno - 1))
100 buffer->element[element].flags =
101 SBAL_FLAGS_MIDDLE_FRAG;
102 else
103 buffer->element[element].flags =
104 SBAL_FLAGS_LAST_FRAG;
105 element++;
107 *next_element_to_fill = element;
110 static inline const char *qeth_get_cardname(struct qeth_card *card)
112 if (card->info.guestlan) {
113 switch (card->info.type) {
114 case QETH_CARD_TYPE_OSD:
115 return " Guest LAN QDIO";
116 case QETH_CARD_TYPE_IQD:
117 return " Guest LAN Hiper";
118 case QETH_CARD_TYPE_OSM:
119 return " Guest LAN QDIO - OSM";
120 case QETH_CARD_TYPE_OSX:
121 return " Guest LAN QDIO - OSX";
122 default:
123 return " unknown";
125 } else {
126 switch (card->info.type) {
127 case QETH_CARD_TYPE_OSD:
128 return " OSD Express";
129 case QETH_CARD_TYPE_IQD:
130 return " HiperSockets";
131 case QETH_CARD_TYPE_OSN:
132 return " OSN QDIO";
133 case QETH_CARD_TYPE_OSM:
134 return " OSM QDIO";
135 case QETH_CARD_TYPE_OSX:
136 return " OSX QDIO";
137 default:
138 return " unknown";
141 return " n/a";
144 /* max length to be returned: 14 */
145 const char *qeth_get_cardname_short(struct qeth_card *card)
147 if (card->info.guestlan) {
148 switch (card->info.type) {
149 case QETH_CARD_TYPE_OSD:
150 return "GuestLAN QDIO";
151 case QETH_CARD_TYPE_IQD:
152 return "GuestLAN Hiper";
153 case QETH_CARD_TYPE_OSM:
154 return "GuestLAN OSM";
155 case QETH_CARD_TYPE_OSX:
156 return "GuestLAN OSX";
157 default:
158 return "unknown";
160 } else {
161 switch (card->info.type) {
162 case QETH_CARD_TYPE_OSD:
163 switch (card->info.link_type) {
164 case QETH_LINK_TYPE_FAST_ETH:
165 return "OSD_100";
166 case QETH_LINK_TYPE_HSTR:
167 return "HSTR";
168 case QETH_LINK_TYPE_GBIT_ETH:
169 return "OSD_1000";
170 case QETH_LINK_TYPE_10GBIT_ETH:
171 return "OSD_10GIG";
172 case QETH_LINK_TYPE_LANE_ETH100:
173 return "OSD_FE_LANE";
174 case QETH_LINK_TYPE_LANE_TR:
175 return "OSD_TR_LANE";
176 case QETH_LINK_TYPE_LANE_ETH1000:
177 return "OSD_GbE_LANE";
178 case QETH_LINK_TYPE_LANE:
179 return "OSD_ATM_LANE";
180 default:
181 return "OSD_Express";
183 case QETH_CARD_TYPE_IQD:
184 return "HiperSockets";
185 case QETH_CARD_TYPE_OSN:
186 return "OSN";
187 case QETH_CARD_TYPE_OSM:
188 return "OSM_1000";
189 case QETH_CARD_TYPE_OSX:
190 return "OSX_10GIG";
191 default:
192 return "unknown";
195 return "n/a";
198 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
199 int clear_start_mask)
201 unsigned long flags;
203 spin_lock_irqsave(&card->thread_mask_lock, flags);
204 card->thread_allowed_mask = threads;
205 if (clear_start_mask)
206 card->thread_start_mask &= threads;
207 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
208 wake_up(&card->wait_q);
210 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
212 int qeth_threads_running(struct qeth_card *card, unsigned long threads)
214 unsigned long flags;
215 int rc = 0;
217 spin_lock_irqsave(&card->thread_mask_lock, flags);
218 rc = (card->thread_running_mask & threads);
219 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
220 return rc;
222 EXPORT_SYMBOL_GPL(qeth_threads_running);
224 int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
226 return wait_event_interruptible(card->wait_q,
227 qeth_threads_running(card, threads) == 0);
229 EXPORT_SYMBOL_GPL(qeth_wait_for_threads);
231 void qeth_clear_working_pool_list(struct qeth_card *card)
233 struct qeth_buffer_pool_entry *pool_entry, *tmp;
235 QETH_DBF_TEXT(TRACE, 5, "clwrklst");
236 list_for_each_entry_safe(pool_entry, tmp,
237 &card->qdio.in_buf_pool.entry_list, list){
238 list_del(&pool_entry->list);
241 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
243 static int qeth_alloc_buffer_pool(struct qeth_card *card)
245 struct qeth_buffer_pool_entry *pool_entry;
246 void *ptr;
247 int i, j;
249 QETH_DBF_TEXT(TRACE, 5, "alocpool");
250 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
251 pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
252 if (!pool_entry) {
253 qeth_free_buffer_pool(card);
254 return -ENOMEM;
256 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
257 ptr = (void *) __get_free_page(GFP_KERNEL);
258 if (!ptr) {
259 while (j > 0)
260 free_page((unsigned long)
261 pool_entry->elements[--j]);
262 kfree(pool_entry);
263 qeth_free_buffer_pool(card);
264 return -ENOMEM;
266 pool_entry->elements[j] = ptr;
268 list_add(&pool_entry->init_list,
269 &card->qdio.init_pool.entry_list);
271 return 0;
274 int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
276 QETH_DBF_TEXT(TRACE, 2, "realcbp");
278 if ((card->state != CARD_STATE_DOWN) &&
279 (card->state != CARD_STATE_RECOVER))
280 return -EPERM;
282 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
283 qeth_clear_working_pool_list(card);
284 qeth_free_buffer_pool(card);
285 card->qdio.in_buf_pool.buf_count = bufcnt;
286 card->qdio.init_pool.buf_count = bufcnt;
287 return qeth_alloc_buffer_pool(card);
289 EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
291 static int qeth_issue_next_read(struct qeth_card *card)
293 int rc;
294 struct qeth_cmd_buffer *iob;
296 QETH_DBF_TEXT(TRACE, 5, "issnxrd");
297 if (card->read.state != CH_STATE_UP)
298 return -EIO;
299 iob = qeth_get_buffer(&card->read);
300 if (!iob) {
301 dev_warn(&card->gdev->dev, "The qeth device driver "
302 "failed to recover an error on the device\n");
303 QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob "
304 "available\n", dev_name(&card->gdev->dev));
305 return -ENOMEM;
307 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
308 QETH_DBF_TEXT(TRACE, 6, "noirqpnd");
309 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
310 (addr_t) iob, 0, 0);
311 if (rc) {
312 QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
313 "rc=%i\n", dev_name(&card->gdev->dev), rc);
314 atomic_set(&card->read.irq_pending, 0);
315 qeth_schedule_recovery(card);
316 wake_up(&card->wait_q);
318 return rc;
321 static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
323 struct qeth_reply *reply;
325 reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
326 if (reply) {
327 atomic_set(&reply->refcnt, 1);
328 atomic_set(&reply->received, 0);
329 reply->card = card;
331 return reply;
334 static void qeth_get_reply(struct qeth_reply *reply)
336 WARN_ON(atomic_read(&reply->refcnt) <= 0);
337 atomic_inc(&reply->refcnt);
340 static void qeth_put_reply(struct qeth_reply *reply)
342 WARN_ON(atomic_read(&reply->refcnt) <= 0);
343 if (atomic_dec_and_test(&reply->refcnt))
344 kfree(reply);
347 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
348 struct qeth_card *card)
350 char *ipa_name;
351 int com = cmd->hdr.command;
352 ipa_name = qeth_get_ipa_cmd_name(com);
353 if (rc)
354 QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s returned x%X \"%s\"\n",
355 ipa_name, com, QETH_CARD_IFNAME(card),
356 rc, qeth_get_ipa_msg(rc));
357 else
358 QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s succeeded\n",
359 ipa_name, com, QETH_CARD_IFNAME(card));
362 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
363 struct qeth_cmd_buffer *iob)
365 struct qeth_ipa_cmd *cmd = NULL;
367 QETH_DBF_TEXT(TRACE, 5, "chkipad");
368 if (IS_IPA(iob->data)) {
369 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
370 if (IS_IPA_REPLY(cmd)) {
371 if (cmd->hdr.command != IPA_CMD_SETCCID &&
372 cmd->hdr.command != IPA_CMD_DELCCID &&
373 cmd->hdr.command != IPA_CMD_MODCCID &&
374 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
375 qeth_issue_ipa_msg(cmd,
376 cmd->hdr.return_code, card);
377 return cmd;
378 } else {
379 switch (cmd->hdr.command) {
380 case IPA_CMD_STOPLAN:
381 dev_warn(&card->gdev->dev,
382 "The link for interface %s on CHPID"
383 " 0x%X failed\n",
384 QETH_CARD_IFNAME(card),
385 card->info.chpid);
386 card->lan_online = 0;
387 if (card->dev && netif_carrier_ok(card->dev))
388 netif_carrier_off(card->dev);
389 return NULL;
390 case IPA_CMD_STARTLAN:
391 dev_info(&card->gdev->dev,
392 "The link for %s on CHPID 0x%X has"
393 " been restored\n",
394 QETH_CARD_IFNAME(card),
395 card->info.chpid);
396 netif_carrier_on(card->dev);
397 card->lan_online = 1;
398 qeth_schedule_recovery(card);
399 return NULL;
400 case IPA_CMD_MODCCID:
401 return cmd;
402 case IPA_CMD_REGISTER_LOCAL_ADDR:
403 QETH_DBF_TEXT(TRACE, 3, "irla");
404 break;
405 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
406 QETH_DBF_TEXT(TRACE, 3, "urla");
407 break;
408 default:
409 QETH_DBF_MESSAGE(2, "Received data is IPA "
410 "but not a reply!\n");
411 break;
415 return cmd;
418 void qeth_clear_ipacmd_list(struct qeth_card *card)
420 struct qeth_reply *reply, *r;
421 unsigned long flags;
423 QETH_DBF_TEXT(TRACE, 4, "clipalst");
425 spin_lock_irqsave(&card->lock, flags);
426 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
427 qeth_get_reply(reply);
428 reply->rc = -EIO;
429 atomic_inc(&reply->received);
430 list_del_init(&reply->list);
431 wake_up(&reply->wait_q);
432 qeth_put_reply(reply);
434 spin_unlock_irqrestore(&card->lock, flags);
436 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
438 static int qeth_check_idx_response(struct qeth_card *card,
439 unsigned char *buffer)
441 if (!buffer)
442 return 0;
444 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
445 if ((buffer[2] & 0xc0) == 0xc0) {
446 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE "
447 "with cause code 0x%02x%s\n",
448 buffer[4],
449 ((buffer[4] == 0x22) ?
450 " -- try another portname" : ""));
451 QETH_DBF_TEXT(TRACE, 2, "ckidxres");
452 QETH_DBF_TEXT(TRACE, 2, " idxterm");
453 QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO);
454 if (buffer[4] == 0xf6) {
455 dev_err(&card->gdev->dev,
456 "The qeth device is not configured "
457 "for the OSI layer required by z/VM\n");
458 return -EPERM;
460 return -EIO;
462 return 0;
465 static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob,
466 __u32 len)
468 struct qeth_card *card;
470 QETH_DBF_TEXT(TRACE, 4, "setupccw");
471 card = CARD_FROM_CDEV(channel->ccwdev);
472 if (channel == &card->read)
473 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
474 else
475 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
476 channel->ccw.count = len;
477 channel->ccw.cda = (__u32) __pa(iob);
480 static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
482 __u8 index;
484 QETH_DBF_TEXT(TRACE, 6, "getbuff");
485 index = channel->io_buf_no;
486 do {
487 if (channel->iob[index].state == BUF_STATE_FREE) {
488 channel->iob[index].state = BUF_STATE_LOCKED;
489 channel->io_buf_no = (channel->io_buf_no + 1) %
490 QETH_CMD_BUFFER_NO;
491 memset(channel->iob[index].data, 0, QETH_BUFSIZE);
492 return channel->iob + index;
494 index = (index + 1) % QETH_CMD_BUFFER_NO;
495 } while (index != channel->io_buf_no);
497 return NULL;
500 void qeth_release_buffer(struct qeth_channel *channel,
501 struct qeth_cmd_buffer *iob)
503 unsigned long flags;
505 QETH_DBF_TEXT(TRACE, 6, "relbuff");
506 spin_lock_irqsave(&channel->iob_lock, flags);
507 memset(iob->data, 0, QETH_BUFSIZE);
508 iob->state = BUF_STATE_FREE;
509 iob->callback = qeth_send_control_data_cb;
510 iob->rc = 0;
511 spin_unlock_irqrestore(&channel->iob_lock, flags);
513 EXPORT_SYMBOL_GPL(qeth_release_buffer);
515 static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
517 struct qeth_cmd_buffer *buffer = NULL;
518 unsigned long flags;
520 spin_lock_irqsave(&channel->iob_lock, flags);
521 buffer = __qeth_get_buffer(channel);
522 spin_unlock_irqrestore(&channel->iob_lock, flags);
523 return buffer;
526 struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel)
528 struct qeth_cmd_buffer *buffer;
529 wait_event(channel->wait_q,
530 ((buffer = qeth_get_buffer(channel)) != NULL));
531 return buffer;
533 EXPORT_SYMBOL_GPL(qeth_wait_for_buffer);
535 void qeth_clear_cmd_buffers(struct qeth_channel *channel)
537 int cnt;
539 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
540 qeth_release_buffer(channel, &channel->iob[cnt]);
541 channel->buf_no = 0;
542 channel->io_buf_no = 0;
544 EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
546 static void qeth_send_control_data_cb(struct qeth_channel *channel,
547 struct qeth_cmd_buffer *iob)
549 struct qeth_card *card;
550 struct qeth_reply *reply, *r;
551 struct qeth_ipa_cmd *cmd;
552 unsigned long flags;
553 int keep_reply;
554 int rc = 0;
556 QETH_DBF_TEXT(TRACE, 4, "sndctlcb");
558 card = CARD_FROM_CDEV(channel->ccwdev);
559 rc = qeth_check_idx_response(card, iob->data);
560 switch (rc) {
561 case 0:
562 break;
563 case -EIO:
564 qeth_clear_ipacmd_list(card);
565 qeth_schedule_recovery(card);
566 default:
567 goto out;
570 cmd = qeth_check_ipa_data(card, iob);
571 if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
572 goto out;
573 /*in case of OSN : check if cmd is set */
574 if (card->info.type == QETH_CARD_TYPE_OSN &&
575 cmd &&
576 cmd->hdr.command != IPA_CMD_STARTLAN &&
577 card->osn_info.assist_cb != NULL) {
578 card->osn_info.assist_cb(card->dev, cmd);
579 goto out;
582 spin_lock_irqsave(&card->lock, flags);
583 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
584 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
585 ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
586 qeth_get_reply(reply);
587 list_del_init(&reply->list);
588 spin_unlock_irqrestore(&card->lock, flags);
589 keep_reply = 0;
590 if (reply->callback != NULL) {
591 if (cmd) {
592 reply->offset = (__u16)((char *)cmd -
593 (char *)iob->data);
594 keep_reply = reply->callback(card,
595 reply,
596 (unsigned long)cmd);
597 } else
598 keep_reply = reply->callback(card,
599 reply,
600 (unsigned long)iob);
602 if (cmd)
603 reply->rc = (u16) cmd->hdr.return_code;
604 else if (iob->rc)
605 reply->rc = iob->rc;
606 if (keep_reply) {
607 spin_lock_irqsave(&card->lock, flags);
608 list_add_tail(&reply->list,
609 &card->cmd_waiter_list);
610 spin_unlock_irqrestore(&card->lock, flags);
611 } else {
612 atomic_inc(&reply->received);
613 wake_up(&reply->wait_q);
615 qeth_put_reply(reply);
616 goto out;
619 spin_unlock_irqrestore(&card->lock, flags);
620 out:
621 memcpy(&card->seqno.pdu_hdr_ack,
622 QETH_PDU_HEADER_SEQ_NO(iob->data),
623 QETH_SEQ_NO_LENGTH);
624 qeth_release_buffer(channel, iob);
627 static int qeth_setup_channel(struct qeth_channel *channel)
629 int cnt;
631 QETH_DBF_TEXT(SETUP, 2, "setupch");
632 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
633 channel->iob[cnt].data =
634 kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
635 if (channel->iob[cnt].data == NULL)
636 break;
637 channel->iob[cnt].state = BUF_STATE_FREE;
638 channel->iob[cnt].channel = channel;
639 channel->iob[cnt].callback = qeth_send_control_data_cb;
640 channel->iob[cnt].rc = 0;
642 if (cnt < QETH_CMD_BUFFER_NO) {
643 while (cnt-- > 0)
644 kfree(channel->iob[cnt].data);
645 return -ENOMEM;
647 channel->buf_no = 0;
648 channel->io_buf_no = 0;
649 atomic_set(&channel->irq_pending, 0);
650 spin_lock_init(&channel->iob_lock);
652 init_waitqueue_head(&channel->wait_q);
653 return 0;
656 static int qeth_set_thread_start_bit(struct qeth_card *card,
657 unsigned long thread)
659 unsigned long flags;
661 spin_lock_irqsave(&card->thread_mask_lock, flags);
662 if (!(card->thread_allowed_mask & thread) ||
663 (card->thread_start_mask & thread)) {
664 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
665 return -EPERM;
667 card->thread_start_mask |= thread;
668 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
669 return 0;
672 void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
674 unsigned long flags;
676 spin_lock_irqsave(&card->thread_mask_lock, flags);
677 card->thread_start_mask &= ~thread;
678 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
679 wake_up(&card->wait_q);
681 EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
683 void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
685 unsigned long flags;
687 spin_lock_irqsave(&card->thread_mask_lock, flags);
688 card->thread_running_mask &= ~thread;
689 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
690 wake_up(&card->wait_q);
692 EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
694 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
696 unsigned long flags;
697 int rc = 0;
699 spin_lock_irqsave(&card->thread_mask_lock, flags);
700 if (card->thread_start_mask & thread) {
701 if ((card->thread_allowed_mask & thread) &&
702 !(card->thread_running_mask & thread)) {
703 rc = 1;
704 card->thread_start_mask &= ~thread;
705 card->thread_running_mask |= thread;
706 } else
707 rc = -EPERM;
709 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
710 return rc;
713 int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
715 int rc = 0;
717 wait_event(card->wait_q,
718 (rc = __qeth_do_run_thread(card, thread)) >= 0);
719 return rc;
721 EXPORT_SYMBOL_GPL(qeth_do_run_thread);
723 void qeth_schedule_recovery(struct qeth_card *card)
725 QETH_DBF_TEXT(TRACE, 2, "startrec");
726 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
727 schedule_work(&card->kernel_thread_starter);
729 EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
731 static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
733 int dstat, cstat;
734 char *sense;
736 sense = (char *) irb->ecw;
737 cstat = irb->scsw.cmd.cstat;
738 dstat = irb->scsw.cmd.dstat;
740 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
741 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
742 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
743 QETH_DBF_TEXT(TRACE, 2, "CGENCHK");
744 dev_warn(&cdev->dev, "The qeth device driver "
745 "failed to recover an error on the device\n");
746 QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
747 dev_name(&cdev->dev), dstat, cstat);
748 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
749 16, 1, irb, 64, 1);
750 return 1;
753 if (dstat & DEV_STAT_UNIT_CHECK) {
754 if (sense[SENSE_RESETTING_EVENT_BYTE] &
755 SENSE_RESETTING_EVENT_FLAG) {
756 QETH_DBF_TEXT(TRACE, 2, "REVIND");
757 return 1;
759 if (sense[SENSE_COMMAND_REJECT_BYTE] &
760 SENSE_COMMAND_REJECT_FLAG) {
761 QETH_DBF_TEXT(TRACE, 2, "CMDREJi");
762 return 1;
764 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
765 QETH_DBF_TEXT(TRACE, 2, "AFFE");
766 return 1;
768 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
769 QETH_DBF_TEXT(TRACE, 2, "ZEROSEN");
770 return 0;
772 QETH_DBF_TEXT(TRACE, 2, "DGENCHK");
773 return 1;
775 return 0;
778 static long __qeth_check_irb_error(struct ccw_device *cdev,
779 unsigned long intparm, struct irb *irb)
781 if (!IS_ERR(irb))
782 return 0;
784 switch (PTR_ERR(irb)) {
785 case -EIO:
786 QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
787 dev_name(&cdev->dev));
788 QETH_DBF_TEXT(TRACE, 2, "ckirberr");
789 QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO);
790 break;
791 case -ETIMEDOUT:
792 dev_warn(&cdev->dev, "A hardware operation timed out"
793 " on the device\n");
794 QETH_DBF_TEXT(TRACE, 2, "ckirberr");
795 QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT);
796 if (intparm == QETH_RCD_PARM) {
797 struct qeth_card *card = CARD_FROM_CDEV(cdev);
799 if (card && (card->data.ccwdev == cdev)) {
800 card->data.state = CH_STATE_DOWN;
801 wake_up(&card->wait_q);
804 break;
805 default:
806 QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
807 dev_name(&cdev->dev), PTR_ERR(irb));
808 QETH_DBF_TEXT(TRACE, 2, "ckirberr");
809 QETH_DBF_TEXT(TRACE, 2, " rc???");
811 return PTR_ERR(irb);
814 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
815 struct irb *irb)
817 int rc;
818 int cstat, dstat;
819 struct qeth_cmd_buffer *buffer;
820 struct qeth_channel *channel;
821 struct qeth_card *card;
822 struct qeth_cmd_buffer *iob;
823 __u8 index;
825 QETH_DBF_TEXT(TRACE, 5, "irq");
827 if (__qeth_check_irb_error(cdev, intparm, irb))
828 return;
829 cstat = irb->scsw.cmd.cstat;
830 dstat = irb->scsw.cmd.dstat;
832 card = CARD_FROM_CDEV(cdev);
833 if (!card)
834 return;
836 if (card->read.ccwdev == cdev) {
837 channel = &card->read;
838 QETH_DBF_TEXT(TRACE, 5, "read");
839 } else if (card->write.ccwdev == cdev) {
840 channel = &card->write;
841 QETH_DBF_TEXT(TRACE, 5, "write");
842 } else {
843 channel = &card->data;
844 QETH_DBF_TEXT(TRACE, 5, "data");
846 atomic_set(&channel->irq_pending, 0);
848 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
849 channel->state = CH_STATE_STOPPED;
851 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
852 channel->state = CH_STATE_HALTED;
854 /*let's wake up immediately on data channel*/
855 if ((channel == &card->data) && (intparm != 0) &&
856 (intparm != QETH_RCD_PARM))
857 goto out;
859 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
860 QETH_DBF_TEXT(TRACE, 6, "clrchpar");
861 /* we don't have to handle this further */
862 intparm = 0;
864 if (intparm == QETH_HALT_CHANNEL_PARM) {
865 QETH_DBF_TEXT(TRACE, 6, "hltchpar");
866 /* we don't have to handle this further */
867 intparm = 0;
869 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
870 (dstat & DEV_STAT_UNIT_CHECK) ||
871 (cstat)) {
872 if (irb->esw.esw0.erw.cons) {
873 dev_warn(&channel->ccwdev->dev,
874 "The qeth device driver failed to recover "
875 "an error on the device\n");
876 QETH_DBF_MESSAGE(2, "%s sense data available. cstat "
877 "0x%X dstat 0x%X\n",
878 dev_name(&channel->ccwdev->dev), cstat, dstat);
879 print_hex_dump(KERN_WARNING, "qeth: irb ",
880 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
881 print_hex_dump(KERN_WARNING, "qeth: sense data ",
882 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
884 if (intparm == QETH_RCD_PARM) {
885 channel->state = CH_STATE_DOWN;
886 goto out;
888 rc = qeth_get_problem(cdev, irb);
889 if (rc) {
890 qeth_clear_ipacmd_list(card);
891 qeth_schedule_recovery(card);
892 goto out;
896 if (intparm == QETH_RCD_PARM) {
897 channel->state = CH_STATE_RCD_DONE;
898 goto out;
900 if (intparm) {
901 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
902 buffer->state = BUF_STATE_PROCESSED;
904 if (channel == &card->data)
905 return;
906 if (channel == &card->read &&
907 channel->state == CH_STATE_UP)
908 qeth_issue_next_read(card);
910 iob = channel->iob;
911 index = channel->buf_no;
912 while (iob[index].state == BUF_STATE_PROCESSED) {
913 if (iob[index].callback != NULL)
914 iob[index].callback(channel, iob + index);
916 index = (index + 1) % QETH_CMD_BUFFER_NO;
918 channel->buf_no = index;
919 out:
920 wake_up(&card->wait_q);
921 return;
924 static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
925 struct qeth_qdio_out_buffer *buf, unsigned int qeth_skip_skb)
927 int i;
928 struct sk_buff *skb;
930 /* is PCI flag set on buffer? */
931 if (buf->buffer->element[0].flags & 0x40)
932 atomic_dec(&queue->set_pci_flags_count);
934 if (!qeth_skip_skb) {
935 skb = skb_dequeue(&buf->skb_list);
936 while (skb) {
937 atomic_dec(&skb->users);
938 dev_kfree_skb_any(skb);
939 skb = skb_dequeue(&buf->skb_list);
942 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
943 if (buf->buffer->element[i].addr && buf->is_header[i])
944 kmem_cache_free(qeth_core_header_cache,
945 buf->buffer->element[i].addr);
946 buf->is_header[i] = 0;
947 buf->buffer->element[i].length = 0;
948 buf->buffer->element[i].addr = NULL;
949 buf->buffer->element[i].flags = 0;
951 buf->buffer->element[15].flags = 0;
952 buf->next_element_to_fill = 0;
953 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
956 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
957 struct qeth_qdio_out_buffer *buf)
959 __qeth_clear_output_buffer(queue, buf, 0);
962 void qeth_clear_qdio_buffers(struct qeth_card *card)
964 int i, j;
966 QETH_DBF_TEXT(TRACE, 2, "clearqdbf");
967 /* clear outbound buffers to free skbs */
968 for (i = 0; i < card->qdio.no_out_queues; ++i)
969 if (card->qdio.out_qs[i]) {
970 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
971 qeth_clear_output_buffer(card->qdio.out_qs[i],
972 &card->qdio.out_qs[i]->bufs[j]);
975 EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
977 static void qeth_free_buffer_pool(struct qeth_card *card)
979 struct qeth_buffer_pool_entry *pool_entry, *tmp;
980 int i = 0;
981 QETH_DBF_TEXT(TRACE, 5, "freepool");
982 list_for_each_entry_safe(pool_entry, tmp,
983 &card->qdio.init_pool.entry_list, init_list){
984 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
985 free_page((unsigned long)pool_entry->elements[i]);
986 list_del(&pool_entry->init_list);
987 kfree(pool_entry);
991 static void qeth_free_qdio_buffers(struct qeth_card *card)
993 int i, j;
995 QETH_DBF_TEXT(TRACE, 2, "freeqdbf");
996 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
997 QETH_QDIO_UNINITIALIZED)
998 return;
999 kfree(card->qdio.in_q);
1000 card->qdio.in_q = NULL;
1001 /* inbound buffer pool */
1002 qeth_free_buffer_pool(card);
1003 /* free outbound qdio_qs */
1004 if (card->qdio.out_qs) {
1005 for (i = 0; i < card->qdio.no_out_queues; ++i) {
1006 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
1007 qeth_clear_output_buffer(card->qdio.out_qs[i],
1008 &card->qdio.out_qs[i]->bufs[j]);
1009 kfree(card->qdio.out_qs[i]);
1011 kfree(card->qdio.out_qs);
1012 card->qdio.out_qs = NULL;
1016 static void qeth_clean_channel(struct qeth_channel *channel)
1018 int cnt;
1020 QETH_DBF_TEXT(SETUP, 2, "freech");
1021 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1022 kfree(channel->iob[cnt].data);
1025 static void qeth_get_channel_path_desc(struct qeth_card *card)
1027 struct ccw_device *ccwdev;
1028 struct channelPath_dsc {
1029 u8 flags;
1030 u8 lsn;
1031 u8 desc;
1032 u8 chpid;
1033 u8 swla;
1034 u8 zeroes;
1035 u8 chla;
1036 u8 chpp;
1037 } *chp_dsc;
1039 QETH_DBF_TEXT(SETUP, 2, "chp_desc");
1041 ccwdev = card->data.ccwdev;
1042 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
1043 if (chp_dsc != NULL) {
1044 /* CHPP field bit 6 == 1 -> single queue */
1045 if ((chp_dsc->chpp & 0x02) == 0x02)
1046 card->qdio.no_out_queues = 1;
1047 card->info.func_level = 0x4100 + chp_dsc->desc;
1048 kfree(chp_dsc);
1050 if (card->qdio.no_out_queues == 1) {
1051 card->qdio.default_out_queue = 0;
1052 dev_info(&card->gdev->dev,
1053 "Priority Queueing not supported\n");
1055 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
1056 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
1057 return;
1060 static void qeth_init_qdio_info(struct qeth_card *card)
1062 QETH_DBF_TEXT(SETUP, 4, "intqdinf");
1063 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1064 /* inbound */
1065 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1066 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1067 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1068 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1069 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1072 static void qeth_set_intial_options(struct qeth_card *card)
1074 card->options.route4.type = NO_ROUTER;
1075 card->options.route6.type = NO_ROUTER;
1076 card->options.checksum_type = QETH_CHECKSUM_DEFAULT;
1077 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1078 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1079 card->options.fake_broadcast = 0;
1080 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1081 card->options.performance_stats = 0;
1082 card->options.rx_sg_cb = QETH_RX_SG_CB;
1083 card->options.isolation = ISOLATION_MODE_NONE;
1086 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1088 unsigned long flags;
1089 int rc = 0;
1091 spin_lock_irqsave(&card->thread_mask_lock, flags);
1092 QETH_DBF_TEXT_(TRACE, 4, " %02x%02x%02x",
1093 (u8) card->thread_start_mask,
1094 (u8) card->thread_allowed_mask,
1095 (u8) card->thread_running_mask);
1096 rc = (card->thread_start_mask & thread);
1097 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1098 return rc;
1101 static void qeth_start_kernel_thread(struct work_struct *work)
1103 struct qeth_card *card = container_of(work, struct qeth_card,
1104 kernel_thread_starter);
1105 QETH_DBF_TEXT(TRACE , 2, "strthrd");
1107 if (card->read.state != CH_STATE_UP &&
1108 card->write.state != CH_STATE_UP)
1109 return;
1110 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
1111 kthread_run(card->discipline.recover, (void *) card,
1112 "qeth_recover");
1115 static int qeth_setup_card(struct qeth_card *card)
1118 QETH_DBF_TEXT(SETUP, 2, "setupcrd");
1119 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1121 card->read.state = CH_STATE_DOWN;
1122 card->write.state = CH_STATE_DOWN;
1123 card->data.state = CH_STATE_DOWN;
1124 card->state = CARD_STATE_DOWN;
1125 card->lan_online = 0;
1126 card->use_hard_stop = 0;
1127 card->dev = NULL;
1128 spin_lock_init(&card->vlanlock);
1129 spin_lock_init(&card->mclock);
1130 card->vlangrp = NULL;
1131 spin_lock_init(&card->lock);
1132 spin_lock_init(&card->ip_lock);
1133 spin_lock_init(&card->thread_mask_lock);
1134 mutex_init(&card->conf_mutex);
1135 card->thread_start_mask = 0;
1136 card->thread_allowed_mask = 0;
1137 card->thread_running_mask = 0;
1138 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1139 INIT_LIST_HEAD(&card->ip_list);
1140 INIT_LIST_HEAD(card->ip_tbd_list);
1141 INIT_LIST_HEAD(&card->cmd_waiter_list);
1142 init_waitqueue_head(&card->wait_q);
1143 /* intial options */
1144 qeth_set_intial_options(card);
1145 /* IP address takeover */
1146 INIT_LIST_HEAD(&card->ipato.entries);
1147 card->ipato.enabled = 0;
1148 card->ipato.invert4 = 0;
1149 card->ipato.invert6 = 0;
1150 /* init QDIO stuff */
1151 qeth_init_qdio_info(card);
1152 return 0;
1155 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1157 struct qeth_card *card = container_of(slr, struct qeth_card,
1158 qeth_service_level);
1159 if (card->info.mcl_level[0])
1160 seq_printf(m, "qeth: %s firmware level %s\n",
1161 CARD_BUS_ID(card), card->info.mcl_level);
1164 static struct qeth_card *qeth_alloc_card(void)
1166 struct qeth_card *card;
1168 QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1169 card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
1170 if (!card)
1171 goto out;
1172 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1173 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1174 if (!card->ip_tbd_list) {
1175 QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
1176 goto out_card;
1178 if (qeth_setup_channel(&card->read))
1179 goto out_ip;
1180 if (qeth_setup_channel(&card->write))
1181 goto out_channel;
1182 card->options.layer2 = -1;
1183 card->qeth_service_level.seq_print = qeth_core_sl_print;
1184 register_service_level(&card->qeth_service_level);
1185 return card;
1187 out_channel:
1188 qeth_clean_channel(&card->read);
1189 out_ip:
1190 kfree(card->ip_tbd_list);
1191 out_card:
1192 kfree(card);
1193 out:
1194 return NULL;
1197 static int qeth_determine_card_type(struct qeth_card *card)
1199 int i = 0;
1201 QETH_DBF_TEXT(SETUP, 2, "detcdtyp");
1203 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1204 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1205 while (known_devices[i][QETH_DEV_MODEL_IND]) {
1206 if ((CARD_RDEV(card)->id.dev_type ==
1207 known_devices[i][QETH_DEV_TYPE_IND]) &&
1208 (CARD_RDEV(card)->id.dev_model ==
1209 known_devices[i][QETH_DEV_MODEL_IND])) {
1210 card->info.type = known_devices[i][QETH_DEV_MODEL_IND];
1211 card->qdio.no_out_queues =
1212 known_devices[i][QETH_QUEUE_NO_IND];
1213 card->info.is_multicast_different =
1214 known_devices[i][QETH_MULTICAST_IND];
1215 qeth_get_channel_path_desc(card);
1216 return 0;
1218 i++;
1220 card->info.type = QETH_CARD_TYPE_UNKNOWN;
1221 dev_err(&card->gdev->dev, "The adapter hardware is of an "
1222 "unknown type\n");
1223 return -ENOENT;
1226 static int qeth_clear_channel(struct qeth_channel *channel)
1228 unsigned long flags;
1229 struct qeth_card *card;
1230 int rc;
1232 QETH_DBF_TEXT(TRACE, 3, "clearch");
1233 card = CARD_FROM_CDEV(channel->ccwdev);
1234 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1235 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
1236 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1238 if (rc)
1239 return rc;
1240 rc = wait_event_interruptible_timeout(card->wait_q,
1241 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1242 if (rc == -ERESTARTSYS)
1243 return rc;
1244 if (channel->state != CH_STATE_STOPPED)
1245 return -ETIME;
1246 channel->state = CH_STATE_DOWN;
1247 return 0;
1250 static int qeth_halt_channel(struct qeth_channel *channel)
1252 unsigned long flags;
1253 struct qeth_card *card;
1254 int rc;
1256 QETH_DBF_TEXT(TRACE, 3, "haltch");
1257 card = CARD_FROM_CDEV(channel->ccwdev);
1258 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1259 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
1260 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1262 if (rc)
1263 return rc;
1264 rc = wait_event_interruptible_timeout(card->wait_q,
1265 channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1266 if (rc == -ERESTARTSYS)
1267 return rc;
1268 if (channel->state != CH_STATE_HALTED)
1269 return -ETIME;
1270 return 0;
1273 static int qeth_halt_channels(struct qeth_card *card)
1275 int rc1 = 0, rc2 = 0, rc3 = 0;
1277 QETH_DBF_TEXT(TRACE, 3, "haltchs");
1278 rc1 = qeth_halt_channel(&card->read);
1279 rc2 = qeth_halt_channel(&card->write);
1280 rc3 = qeth_halt_channel(&card->data);
1281 if (rc1)
1282 return rc1;
1283 if (rc2)
1284 return rc2;
1285 return rc3;
1288 static int qeth_clear_channels(struct qeth_card *card)
1290 int rc1 = 0, rc2 = 0, rc3 = 0;
1292 QETH_DBF_TEXT(TRACE, 3, "clearchs");
1293 rc1 = qeth_clear_channel(&card->read);
1294 rc2 = qeth_clear_channel(&card->write);
1295 rc3 = qeth_clear_channel(&card->data);
1296 if (rc1)
1297 return rc1;
1298 if (rc2)
1299 return rc2;
1300 return rc3;
1303 static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1305 int rc = 0;
1307 QETH_DBF_TEXT(TRACE, 3, "clhacrd");
1308 QETH_DBF_HEX(TRACE, 3, &card, sizeof(void *));
1310 if (halt)
1311 rc = qeth_halt_channels(card);
1312 if (rc)
1313 return rc;
1314 return qeth_clear_channels(card);
1317 int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1319 int rc = 0;
1321 QETH_DBF_TEXT(TRACE, 3, "qdioclr");
1322 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1323 QETH_QDIO_CLEANING)) {
1324 case QETH_QDIO_ESTABLISHED:
1325 if (card->info.type == QETH_CARD_TYPE_IQD)
1326 rc = qdio_shutdown(CARD_DDEV(card),
1327 QDIO_FLAG_CLEANUP_USING_HALT);
1328 else
1329 rc = qdio_shutdown(CARD_DDEV(card),
1330 QDIO_FLAG_CLEANUP_USING_CLEAR);
1331 if (rc)
1332 QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc);
1333 qdio_free(CARD_DDEV(card));
1334 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1335 break;
1336 case QETH_QDIO_CLEANING:
1337 return rc;
1338 default:
1339 break;
1341 rc = qeth_clear_halt_card(card, use_halt);
1342 if (rc)
1343 QETH_DBF_TEXT_(TRACE, 3, "2err%d", rc);
1344 card->state = CARD_STATE_DOWN;
1345 return rc;
1347 EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1349 static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
1350 int *length)
1352 struct ciw *ciw;
1353 char *rcd_buf;
1354 int ret;
1355 struct qeth_channel *channel = &card->data;
1356 unsigned long flags;
1359 * scan for RCD command in extended SenseID data
1361 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1362 if (!ciw || ciw->cmd == 0)
1363 return -EOPNOTSUPP;
1364 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
1365 if (!rcd_buf)
1366 return -ENOMEM;
1368 channel->ccw.cmd_code = ciw->cmd;
1369 channel->ccw.cda = (__u32) __pa(rcd_buf);
1370 channel->ccw.count = ciw->count;
1371 channel->ccw.flags = CCW_FLAG_SLI;
1372 channel->state = CH_STATE_RCD;
1373 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1374 ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
1375 QETH_RCD_PARM, LPM_ANYPATH, 0,
1376 QETH_RCD_TIMEOUT);
1377 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1378 if (!ret)
1379 wait_event(card->wait_q,
1380 (channel->state == CH_STATE_RCD_DONE ||
1381 channel->state == CH_STATE_DOWN));
1382 if (channel->state == CH_STATE_DOWN)
1383 ret = -EIO;
1384 else
1385 channel->state = CH_STATE_DOWN;
1386 if (ret) {
1387 kfree(rcd_buf);
1388 *buffer = NULL;
1389 *length = 0;
1390 } else {
1391 *length = ciw->count;
1392 *buffer = rcd_buf;
1394 return ret;
1397 static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
1399 QETH_DBF_TEXT(SETUP, 2, "cfgunit");
1400 card->info.chpid = prcd[30];
1401 card->info.unit_addr2 = prcd[31];
1402 card->info.cula = prcd[63];
1403 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1404 (prcd[0x11] == _ascebc['M']));
1407 static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
1409 QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
1411 if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && prcd[76] == 0xF5) {
1412 card->info.blkt.time_total = 250;
1413 card->info.blkt.inter_packet = 5;
1414 card->info.blkt.inter_packet_jumbo = 15;
1415 } else {
1416 card->info.blkt.time_total = 0;
1417 card->info.blkt.inter_packet = 0;
1418 card->info.blkt.inter_packet_jumbo = 0;
1422 static void qeth_init_tokens(struct qeth_card *card)
1424 card->token.issuer_rm_w = 0x00010103UL;
1425 card->token.cm_filter_w = 0x00010108UL;
1426 card->token.cm_connection_w = 0x0001010aUL;
1427 card->token.ulp_filter_w = 0x0001010bUL;
1428 card->token.ulp_connection_w = 0x0001010dUL;
1431 static void qeth_init_func_level(struct qeth_card *card)
1433 switch (card->info.type) {
1434 case QETH_CARD_TYPE_IQD:
1435 if (card->ipato.enabled)
1436 card->info.func_level =
1437 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
1438 else
1439 card->info.func_level =
1440 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
1441 break;
1442 case QETH_CARD_TYPE_OSD:
1443 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1444 break;
1445 default:
1446 break;
1450 static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
1451 void (*idx_reply_cb)(struct qeth_channel *,
1452 struct qeth_cmd_buffer *))
1454 struct qeth_cmd_buffer *iob;
1455 unsigned long flags;
1456 int rc;
1457 struct qeth_card *card;
1459 QETH_DBF_TEXT(SETUP, 2, "idxanswr");
1460 card = CARD_FROM_CDEV(channel->ccwdev);
1461 iob = qeth_get_buffer(channel);
1462 iob->callback = idx_reply_cb;
1463 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1464 channel->ccw.count = QETH_BUFSIZE;
1465 channel->ccw.cda = (__u32) __pa(iob->data);
1467 wait_event(card->wait_q,
1468 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1469 QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1470 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1471 rc = ccw_device_start(channel->ccwdev,
1472 &channel->ccw, (addr_t) iob, 0, 0);
1473 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1475 if (rc) {
1476 QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
1477 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
1478 atomic_set(&channel->irq_pending, 0);
1479 wake_up(&card->wait_q);
1480 return rc;
1482 rc = wait_event_interruptible_timeout(card->wait_q,
1483 channel->state == CH_STATE_UP, QETH_TIMEOUT);
1484 if (rc == -ERESTARTSYS)
1485 return rc;
1486 if (channel->state != CH_STATE_UP) {
1487 rc = -ETIME;
1488 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
1489 qeth_clear_cmd_buffers(channel);
1490 } else
1491 rc = 0;
1492 return rc;
1495 static int qeth_idx_activate_channel(struct qeth_channel *channel,
1496 void (*idx_reply_cb)(struct qeth_channel *,
1497 struct qeth_cmd_buffer *))
1499 struct qeth_card *card;
1500 struct qeth_cmd_buffer *iob;
1501 unsigned long flags;
1502 __u16 temp;
1503 __u8 tmp;
1504 int rc;
1505 struct ccw_dev_id temp_devid;
1507 card = CARD_FROM_CDEV(channel->ccwdev);
1509 QETH_DBF_TEXT(SETUP, 2, "idxactch");
1511 iob = qeth_get_buffer(channel);
1512 iob->callback = idx_reply_cb;
1513 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1514 channel->ccw.count = IDX_ACTIVATE_SIZE;
1515 channel->ccw.cda = (__u32) __pa(iob->data);
1516 if (channel == &card->write) {
1517 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1518 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1519 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1520 card->seqno.trans_hdr++;
1521 } else {
1522 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1523 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1524 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1526 tmp = ((__u8)card->info.portno) | 0x80;
1527 memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
1528 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1529 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
1530 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1531 &card->info.func_level, sizeof(__u16));
1532 ccw_device_get_id(CARD_DDEV(card), &temp_devid);
1533 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2);
1534 temp = (card->info.cula << 8) + card->info.unit_addr2;
1535 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1537 wait_event(card->wait_q,
1538 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1539 QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1540 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1541 rc = ccw_device_start(channel->ccwdev,
1542 &channel->ccw, (addr_t) iob, 0, 0);
1543 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1545 if (rc) {
1546 QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n",
1547 rc);
1548 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
1549 atomic_set(&channel->irq_pending, 0);
1550 wake_up(&card->wait_q);
1551 return rc;
1553 rc = wait_event_interruptible_timeout(card->wait_q,
1554 channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1555 if (rc == -ERESTARTSYS)
1556 return rc;
1557 if (channel->state != CH_STATE_ACTIVATING) {
1558 dev_warn(&channel->ccwdev->dev, "The qeth device driver"
1559 " failed to recover an error on the device\n");
1560 QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
1561 dev_name(&channel->ccwdev->dev));
1562 QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
1563 qeth_clear_cmd_buffers(channel);
1564 return -ETIME;
1566 return qeth_idx_activate_get_answer(channel, idx_reply_cb);
1569 static int qeth_peer_func_level(int level)
1571 if ((level & 0xff) == 8)
1572 return (level & 0xff) + 0x400;
1573 if (((level >> 8) & 3) == 1)
1574 return (level & 0xff) + 0x200;
1575 return level;
1578 static void qeth_idx_write_cb(struct qeth_channel *channel,
1579 struct qeth_cmd_buffer *iob)
1581 struct qeth_card *card;
1582 __u16 temp;
1584 QETH_DBF_TEXT(SETUP , 2, "idxwrcb");
1586 if (channel->state == CH_STATE_DOWN) {
1587 channel->state = CH_STATE_ACTIVATING;
1588 goto out;
1590 card = CARD_FROM_CDEV(channel->ccwdev);
1592 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1593 if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
1594 dev_err(&card->write.ccwdev->dev,
1595 "The adapter is used exclusively by another "
1596 "host\n");
1597 else
1598 QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:"
1599 " negative reply\n",
1600 dev_name(&card->write.ccwdev->dev));
1601 goto out;
1603 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1604 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1605 QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: "
1606 "function level mismatch (sent: 0x%x, received: "
1607 "0x%x)\n", dev_name(&card->write.ccwdev->dev),
1608 card->info.func_level, temp);
1609 goto out;
1611 channel->state = CH_STATE_UP;
1612 out:
1613 qeth_release_buffer(channel, iob);
1616 static void qeth_idx_read_cb(struct qeth_channel *channel,
1617 struct qeth_cmd_buffer *iob)
1619 struct qeth_card *card;
1620 __u16 temp;
1622 QETH_DBF_TEXT(SETUP , 2, "idxrdcb");
1623 if (channel->state == CH_STATE_DOWN) {
1624 channel->state = CH_STATE_ACTIVATING;
1625 goto out;
1628 card = CARD_FROM_CDEV(channel->ccwdev);
1629 if (qeth_check_idx_response(card, iob->data))
1630 goto out;
1632 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1633 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
1634 case QETH_IDX_ACT_ERR_EXCL:
1635 dev_err(&card->write.ccwdev->dev,
1636 "The adapter is used exclusively by another "
1637 "host\n");
1638 break;
1639 case QETH_IDX_ACT_ERR_AUTH:
1640 dev_err(&card->read.ccwdev->dev,
1641 "Setting the device online failed because of "
1642 "insufficient LPAR authorization\n");
1643 break;
1644 default:
1645 QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
1646 " negative reply\n",
1647 dev_name(&card->read.ccwdev->dev));
1649 goto out;
1653 * * temporary fix for microcode bug
1654 * * to revert it,replace OR by AND
1655 * */
1656 if ((!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
1657 (card->info.type == QETH_CARD_TYPE_OSD))
1658 card->info.portname_required = 1;
1660 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1661 if (temp != qeth_peer_func_level(card->info.func_level)) {
1662 QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function "
1663 "level mismatch (sent: 0x%x, received: 0x%x)\n",
1664 dev_name(&card->read.ccwdev->dev),
1665 card->info.func_level, temp);
1666 goto out;
1668 memcpy(&card->token.issuer_rm_r,
1669 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1670 QETH_MPC_TOKEN_LENGTH);
1671 memcpy(&card->info.mcl_level[0],
1672 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1673 channel->state = CH_STATE_UP;
1674 out:
1675 qeth_release_buffer(channel, iob);
1678 void qeth_prepare_control_data(struct qeth_card *card, int len,
1679 struct qeth_cmd_buffer *iob)
1681 qeth_setup_ccw(&card->write, iob->data, len);
1682 iob->callback = qeth_release_buffer;
1684 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1685 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1686 card->seqno.trans_hdr++;
1687 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1688 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1689 card->seqno.pdu_hdr++;
1690 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1691 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1692 QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
1694 EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
1696 int qeth_send_control_data(struct qeth_card *card, int len,
1697 struct qeth_cmd_buffer *iob,
1698 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
1699 unsigned long),
1700 void *reply_param)
1702 int rc;
1703 unsigned long flags;
1704 struct qeth_reply *reply = NULL;
1705 unsigned long timeout, event_timeout;
1706 struct qeth_ipa_cmd *cmd;
1708 QETH_DBF_TEXT(TRACE, 2, "sendctl");
1710 reply = qeth_alloc_reply(card);
1711 if (!reply) {
1712 return -ENOMEM;
1714 reply->callback = reply_cb;
1715 reply->param = reply_param;
1716 if (card->state == CARD_STATE_DOWN)
1717 reply->seqno = QETH_IDX_COMMAND_SEQNO;
1718 else
1719 reply->seqno = card->seqno.ipa++;
1720 init_waitqueue_head(&reply->wait_q);
1721 spin_lock_irqsave(&card->lock, flags);
1722 list_add_tail(&reply->list, &card->cmd_waiter_list);
1723 spin_unlock_irqrestore(&card->lock, flags);
1724 QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
1726 while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
1727 qeth_prepare_control_data(card, len, iob);
1729 if (IS_IPA(iob->data))
1730 event_timeout = QETH_IPA_TIMEOUT;
1731 else
1732 event_timeout = QETH_TIMEOUT;
1733 timeout = jiffies + event_timeout;
1735 QETH_DBF_TEXT(TRACE, 6, "noirqpnd");
1736 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1737 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1738 (addr_t) iob, 0, 0);
1739 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1740 if (rc) {
1741 QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
1742 "ccw_device_start rc = %i\n",
1743 dev_name(&card->write.ccwdev->dev), rc);
1744 QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
1745 spin_lock_irqsave(&card->lock, flags);
1746 list_del_init(&reply->list);
1747 qeth_put_reply(reply);
1748 spin_unlock_irqrestore(&card->lock, flags);
1749 qeth_release_buffer(iob->channel, iob);
1750 atomic_set(&card->write.irq_pending, 0);
1751 wake_up(&card->wait_q);
1752 return rc;
1755 /* we have only one long running ipassist, since we can ensure
1756 process context of this command we can sleep */
1757 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1758 if ((cmd->hdr.command == IPA_CMD_SETIP) &&
1759 (cmd->hdr.prot_version == QETH_PROT_IPV4)) {
1760 if (!wait_event_timeout(reply->wait_q,
1761 atomic_read(&reply->received), event_timeout))
1762 goto time_err;
1763 } else {
1764 while (!atomic_read(&reply->received)) {
1765 if (time_after(jiffies, timeout))
1766 goto time_err;
1767 cpu_relax();
1771 rc = reply->rc;
1772 qeth_put_reply(reply);
1773 return rc;
1775 time_err:
1776 spin_lock_irqsave(&reply->card->lock, flags);
1777 list_del_init(&reply->list);
1778 spin_unlock_irqrestore(&reply->card->lock, flags);
1779 reply->rc = -ETIME;
1780 atomic_inc(&reply->received);
1781 wake_up(&reply->wait_q);
1782 rc = reply->rc;
1783 qeth_put_reply(reply);
1784 return rc;
1786 EXPORT_SYMBOL_GPL(qeth_send_control_data);
1788 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1789 unsigned long data)
1791 struct qeth_cmd_buffer *iob;
1793 QETH_DBF_TEXT(SETUP, 2, "cmenblcb");
1795 iob = (struct qeth_cmd_buffer *) data;
1796 memcpy(&card->token.cm_filter_r,
1797 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
1798 QETH_MPC_TOKEN_LENGTH);
1799 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
1800 return 0;
1803 static int qeth_cm_enable(struct qeth_card *card)
1805 int rc;
1806 struct qeth_cmd_buffer *iob;
1808 QETH_DBF_TEXT(SETUP, 2, "cmenable");
1810 iob = qeth_wait_for_buffer(&card->write);
1811 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
1812 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
1813 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1814 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
1815 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
1817 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
1818 qeth_cm_enable_cb, NULL);
1819 return rc;
1822 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1823 unsigned long data)
1826 struct qeth_cmd_buffer *iob;
1828 QETH_DBF_TEXT(SETUP, 2, "cmsetpcb");
1830 iob = (struct qeth_cmd_buffer *) data;
1831 memcpy(&card->token.cm_connection_r,
1832 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
1833 QETH_MPC_TOKEN_LENGTH);
1834 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
1835 return 0;
1838 static int qeth_cm_setup(struct qeth_card *card)
1840 int rc;
1841 struct qeth_cmd_buffer *iob;
1843 QETH_DBF_TEXT(SETUP, 2, "cmsetup");
1845 iob = qeth_wait_for_buffer(&card->write);
1846 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
1847 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
1848 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1849 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
1850 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
1851 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
1852 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
1853 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
1854 qeth_cm_setup_cb, NULL);
1855 return rc;
1859 static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
1861 switch (card->info.type) {
1862 case QETH_CARD_TYPE_UNKNOWN:
1863 return 1500;
1864 case QETH_CARD_TYPE_IQD:
1865 return card->info.max_mtu;
1866 case QETH_CARD_TYPE_OSD:
1867 switch (card->info.link_type) {
1868 case QETH_LINK_TYPE_HSTR:
1869 case QETH_LINK_TYPE_LANE_TR:
1870 return 2000;
1871 default:
1872 return 1492;
1874 case QETH_CARD_TYPE_OSM:
1875 case QETH_CARD_TYPE_OSX:
1876 return 1492;
1877 default:
1878 return 1500;
1882 static inline int qeth_get_max_mtu_for_card(int cardtype)
1884 switch (cardtype) {
1886 case QETH_CARD_TYPE_UNKNOWN:
1887 case QETH_CARD_TYPE_OSD:
1888 case QETH_CARD_TYPE_OSN:
1889 case QETH_CARD_TYPE_OSM:
1890 case QETH_CARD_TYPE_OSX:
1891 return 61440;
1892 case QETH_CARD_TYPE_IQD:
1893 return 57344;
1894 default:
1895 return 1500;
1899 static inline int qeth_get_mtu_out_of_mpc(int cardtype)
1901 switch (cardtype) {
1902 case QETH_CARD_TYPE_IQD:
1903 return 1;
1904 default:
1905 return 0;
1909 static inline int qeth_get_mtu_outof_framesize(int framesize)
1911 switch (framesize) {
1912 case 0x4000:
1913 return 8192;
1914 case 0x6000:
1915 return 16384;
1916 case 0xa000:
1917 return 32768;
1918 case 0xffff:
1919 return 57344;
1920 default:
1921 return 0;
1925 static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
1927 switch (card->info.type) {
1928 case QETH_CARD_TYPE_OSD:
1929 case QETH_CARD_TYPE_OSM:
1930 case QETH_CARD_TYPE_OSX:
1931 return ((mtu >= 576) && (mtu <= 61440));
1932 case QETH_CARD_TYPE_IQD:
1933 return ((mtu >= 576) &&
1934 (mtu <= card->info.max_mtu + 4096 - 32));
1935 case QETH_CARD_TYPE_OSN:
1936 case QETH_CARD_TYPE_UNKNOWN:
1937 default:
1938 return 1;
1942 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1943 unsigned long data)
1946 __u16 mtu, framesize;
1947 __u16 len;
1948 __u8 link_type;
1949 struct qeth_cmd_buffer *iob;
1951 QETH_DBF_TEXT(SETUP, 2, "ulpenacb");
1953 iob = (struct qeth_cmd_buffer *) data;
1954 memcpy(&card->token.ulp_filter_r,
1955 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
1956 QETH_MPC_TOKEN_LENGTH);
1957 if (qeth_get_mtu_out_of_mpc(card->info.type)) {
1958 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
1959 mtu = qeth_get_mtu_outof_framesize(framesize);
1960 if (!mtu) {
1961 iob->rc = -EINVAL;
1962 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
1963 return 0;
1965 card->info.max_mtu = mtu;
1966 card->info.initial_mtu = mtu;
1967 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
1968 } else {
1969 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
1970 card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
1971 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1974 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
1975 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
1976 memcpy(&link_type,
1977 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
1978 card->info.link_type = link_type;
1979 } else
1980 card->info.link_type = 0;
1981 QETH_DBF_TEXT_(SETUP, 2, "link%d", link_type);
1982 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
1983 return 0;
1986 static int qeth_ulp_enable(struct qeth_card *card)
1988 int rc;
1989 char prot_type;
1990 struct qeth_cmd_buffer *iob;
1992 /*FIXME: trace view callbacks*/
1993 QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
1995 iob = qeth_wait_for_buffer(&card->write);
1996 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
1998 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
1999 (__u8) card->info.portno;
2000 if (card->options.layer2)
2001 if (card->info.type == QETH_CARD_TYPE_OSN)
2002 prot_type = QETH_PROT_OSN2;
2003 else
2004 prot_type = QETH_PROT_LAYER2;
2005 else
2006 prot_type = QETH_PROT_TCPIP;
2008 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2009 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2010 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2011 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2012 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2013 memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
2014 card->info.portname, 9);
2015 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
2016 qeth_ulp_enable_cb, NULL);
2017 return rc;
2021 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2022 unsigned long data)
2024 struct qeth_cmd_buffer *iob;
2025 int rc = 0;
2027 QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
2029 iob = (struct qeth_cmd_buffer *) data;
2030 memcpy(&card->token.ulp_connection_r,
2031 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2032 QETH_MPC_TOKEN_LENGTH);
2033 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2034 3)) {
2035 QETH_DBF_TEXT(SETUP, 2, "olmlimit");
2036 dev_err(&card->gdev->dev, "A connection could not be "
2037 "established because of an OLM limit\n");
2038 rc = -EMLINK;
2040 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
2041 return rc;
2044 static int qeth_ulp_setup(struct qeth_card *card)
2046 int rc;
2047 __u16 temp;
2048 struct qeth_cmd_buffer *iob;
2049 struct ccw_dev_id dev_id;
2051 QETH_DBF_TEXT(SETUP, 2, "ulpsetup");
2053 iob = qeth_wait_for_buffer(&card->write);
2054 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
2056 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2057 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2058 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2059 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2060 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2061 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2063 ccw_device_get_id(CARD_DDEV(card), &dev_id);
2064 memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
2065 temp = (card->info.cula << 8) + card->info.unit_addr2;
2066 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2067 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
2068 qeth_ulp_setup_cb, NULL);
2069 return rc;
2072 static int qeth_alloc_qdio_buffers(struct qeth_card *card)
2074 int i, j;
2076 QETH_DBF_TEXT(SETUP, 2, "allcqdbf");
2078 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2079 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2080 return 0;
2082 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
2083 GFP_KERNEL);
2084 if (!card->qdio.in_q)
2085 goto out_nomem;
2086 QETH_DBF_TEXT(SETUP, 2, "inq");
2087 QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *));
2088 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
2089 /* give inbound qeth_qdio_buffers their qdio_buffers */
2090 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
2091 card->qdio.in_q->bufs[i].buffer =
2092 &card->qdio.in_q->qdio_bufs[i];
2093 /* inbound buffer pool */
2094 if (qeth_alloc_buffer_pool(card))
2095 goto out_freeinq;
2096 /* outbound */
2097 card->qdio.out_qs =
2098 kmalloc(card->qdio.no_out_queues *
2099 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
2100 if (!card->qdio.out_qs)
2101 goto out_freepool;
2102 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2103 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
2104 GFP_KERNEL);
2105 if (!card->qdio.out_qs[i])
2106 goto out_freeoutq;
2107 QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
2108 QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
2109 memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
2110 card->qdio.out_qs[i]->queue_no = i;
2111 /* give outbound qeth_qdio_buffers their qdio_buffers */
2112 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2113 card->qdio.out_qs[i]->bufs[j].buffer =
2114 &card->qdio.out_qs[i]->qdio_bufs[j];
2115 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
2116 skb_list);
2117 lockdep_set_class(
2118 &card->qdio.out_qs[i]->bufs[j].skb_list.lock,
2119 &qdio_out_skb_queue_key);
2120 INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
2123 return 0;
2125 out_freeoutq:
2126 while (i > 0)
2127 kfree(card->qdio.out_qs[--i]);
2128 kfree(card->qdio.out_qs);
2129 card->qdio.out_qs = NULL;
2130 out_freepool:
2131 qeth_free_buffer_pool(card);
2132 out_freeinq:
2133 kfree(card->qdio.in_q);
2134 card->qdio.in_q = NULL;
2135 out_nomem:
2136 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2137 return -ENOMEM;
2140 static void qeth_create_qib_param_field(struct qeth_card *card,
2141 char *param_field)
2144 param_field[0] = _ascebc['P'];
2145 param_field[1] = _ascebc['C'];
2146 param_field[2] = _ascebc['I'];
2147 param_field[3] = _ascebc['T'];
2148 *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
2149 *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
2150 *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
2153 static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2154 char *param_field)
2156 param_field[16] = _ascebc['B'];
2157 param_field[17] = _ascebc['L'];
2158 param_field[18] = _ascebc['K'];
2159 param_field[19] = _ascebc['T'];
2160 *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
2161 *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
2162 *((unsigned int *) (&param_field[28])) =
2163 card->info.blkt.inter_packet_jumbo;
2166 static int qeth_qdio_activate(struct qeth_card *card)
2168 QETH_DBF_TEXT(SETUP, 3, "qdioact");
2169 return qdio_activate(CARD_DDEV(card));
2172 static int qeth_dm_act(struct qeth_card *card)
2174 int rc;
2175 struct qeth_cmd_buffer *iob;
2177 QETH_DBF_TEXT(SETUP, 2, "dmact");
2179 iob = qeth_wait_for_buffer(&card->write);
2180 memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
2182 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2183 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2184 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2185 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2186 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
2187 return rc;
2190 static int qeth_mpc_initialize(struct qeth_card *card)
2192 int rc;
2194 QETH_DBF_TEXT(SETUP, 2, "mpcinit");
2196 rc = qeth_issue_next_read(card);
2197 if (rc) {
2198 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2199 return rc;
2201 rc = qeth_cm_enable(card);
2202 if (rc) {
2203 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
2204 goto out_qdio;
2206 rc = qeth_cm_setup(card);
2207 if (rc) {
2208 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
2209 goto out_qdio;
2211 rc = qeth_ulp_enable(card);
2212 if (rc) {
2213 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
2214 goto out_qdio;
2216 rc = qeth_ulp_setup(card);
2217 if (rc) {
2218 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
2219 goto out_qdio;
2221 rc = qeth_alloc_qdio_buffers(card);
2222 if (rc) {
2223 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
2224 goto out_qdio;
2226 rc = qeth_qdio_establish(card);
2227 if (rc) {
2228 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
2229 qeth_free_qdio_buffers(card);
2230 goto out_qdio;
2232 rc = qeth_qdio_activate(card);
2233 if (rc) {
2234 QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
2235 goto out_qdio;
2237 rc = qeth_dm_act(card);
2238 if (rc) {
2239 QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
2240 goto out_qdio;
2243 return 0;
2244 out_qdio:
2245 qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
2246 return rc;
2249 static void qeth_print_status_with_portname(struct qeth_card *card)
2251 char dbf_text[15];
2252 int i;
2254 sprintf(dbf_text, "%s", card->info.portname + 1);
2255 for (i = 0; i < 8; i++)
2256 dbf_text[i] =
2257 (char) _ebcasc[(__u8) dbf_text[i]];
2258 dbf_text[8] = 0;
2259 dev_info(&card->gdev->dev, "Device is a%s card%s%s%s\n"
2260 "with link type %s (portname: %s)\n",
2261 qeth_get_cardname(card),
2262 (card->info.mcl_level[0]) ? " (level: " : "",
2263 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2264 (card->info.mcl_level[0]) ? ")" : "",
2265 qeth_get_cardname_short(card),
2266 dbf_text);
2270 static void qeth_print_status_no_portname(struct qeth_card *card)
2272 if (card->info.portname[0])
2273 dev_info(&card->gdev->dev, "Device is a%s "
2274 "card%s%s%s\nwith link type %s "
2275 "(no portname needed by interface).\n",
2276 qeth_get_cardname(card),
2277 (card->info.mcl_level[0]) ? " (level: " : "",
2278 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2279 (card->info.mcl_level[0]) ? ")" : "",
2280 qeth_get_cardname_short(card));
2281 else
2282 dev_info(&card->gdev->dev, "Device is a%s "
2283 "card%s%s%s\nwith link type %s.\n",
2284 qeth_get_cardname(card),
2285 (card->info.mcl_level[0]) ? " (level: " : "",
2286 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2287 (card->info.mcl_level[0]) ? ")" : "",
2288 qeth_get_cardname_short(card));
2291 void qeth_print_status_message(struct qeth_card *card)
2293 switch (card->info.type) {
2294 case QETH_CARD_TYPE_OSD:
2295 case QETH_CARD_TYPE_OSM:
2296 case QETH_CARD_TYPE_OSX:
2297 /* VM will use a non-zero first character
2298 * to indicate a HiperSockets like reporting
2299 * of the level OSA sets the first character to zero
2300 * */
2301 if (!card->info.mcl_level[0]) {
2302 sprintf(card->info.mcl_level, "%02x%02x",
2303 card->info.mcl_level[2],
2304 card->info.mcl_level[3]);
2306 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2307 break;
2309 /* fallthrough */
2310 case QETH_CARD_TYPE_IQD:
2311 if ((card->info.guestlan) ||
2312 (card->info.mcl_level[0] & 0x80)) {
2313 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2314 card->info.mcl_level[0]];
2315 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2316 card->info.mcl_level[1]];
2317 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2318 card->info.mcl_level[2]];
2319 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2320 card->info.mcl_level[3]];
2321 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2323 break;
2324 default:
2325 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2327 if (card->info.portname_required)
2328 qeth_print_status_with_portname(card);
2329 else
2330 qeth_print_status_no_portname(card);
2332 EXPORT_SYMBOL_GPL(qeth_print_status_message);
2334 static void qeth_initialize_working_pool_list(struct qeth_card *card)
2336 struct qeth_buffer_pool_entry *entry;
2338 QETH_DBF_TEXT(TRACE, 5, "inwrklst");
2340 list_for_each_entry(entry,
2341 &card->qdio.init_pool.entry_list, init_list) {
2342 qeth_put_buffer_pool_entry(card, entry);
2346 static inline struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2347 struct qeth_card *card)
2349 struct list_head *plh;
2350 struct qeth_buffer_pool_entry *entry;
2351 int i, free;
2352 struct page *page;
2354 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2355 return NULL;
2357 list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2358 entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2359 free = 1;
2360 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2361 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2362 free = 0;
2363 break;
2366 if (free) {
2367 list_del_init(&entry->list);
2368 return entry;
2372 /* no free buffer in pool so take first one and swap pages */
2373 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2374 struct qeth_buffer_pool_entry, list);
2375 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2376 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2377 page = alloc_page(GFP_ATOMIC);
2378 if (!page) {
2379 return NULL;
2380 } else {
2381 free_page((unsigned long)entry->elements[i]);
2382 entry->elements[i] = page_address(page);
2383 if (card->options.performance_stats)
2384 card->perf_stats.sg_alloc_page_rx++;
2388 list_del_init(&entry->list);
2389 return entry;
2392 static int qeth_init_input_buffer(struct qeth_card *card,
2393 struct qeth_qdio_buffer *buf)
2395 struct qeth_buffer_pool_entry *pool_entry;
2396 int i;
2398 pool_entry = qeth_find_free_buffer_pool_entry(card);
2399 if (!pool_entry)
2400 return 1;
2403 * since the buffer is accessed only from the input_tasklet
2404 * there shouldn't be a need to synchronize; also, since we use
2405 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2406 * buffers
2409 buf->pool_entry = pool_entry;
2410 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2411 buf->buffer->element[i].length = PAGE_SIZE;
2412 buf->buffer->element[i].addr = pool_entry->elements[i];
2413 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2414 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
2415 else
2416 buf->buffer->element[i].flags = 0;
2418 return 0;
2421 int qeth_init_qdio_queues(struct qeth_card *card)
2423 int i, j;
2424 int rc;
2426 QETH_DBF_TEXT(SETUP, 2, "initqdqs");
2428 /* inbound queue */
2429 memset(card->qdio.in_q->qdio_bufs, 0,
2430 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
2431 qeth_initialize_working_pool_list(card);
2432 /*give only as many buffers to hardware as we have buffer pool entries*/
2433 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
2434 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2435 card->qdio.in_q->next_buf_to_init =
2436 card->qdio.in_buf_pool.buf_count - 1;
2437 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2438 card->qdio.in_buf_pool.buf_count - 1);
2439 if (rc) {
2440 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2441 return rc;
2443 /* outbound queue */
2444 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2445 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
2446 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
2447 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2448 qeth_clear_output_buffer(card->qdio.out_qs[i],
2449 &card->qdio.out_qs[i]->bufs[j]);
2451 card->qdio.out_qs[i]->card = card;
2452 card->qdio.out_qs[i]->next_buf_to_fill = 0;
2453 card->qdio.out_qs[i]->do_pack = 0;
2454 atomic_set(&card->qdio.out_qs[i]->used_buffers, 0);
2455 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
2456 atomic_set(&card->qdio.out_qs[i]->state,
2457 QETH_OUT_Q_UNLOCKED);
2459 return 0;
2461 EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
2463 static inline __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
2465 switch (link_type) {
2466 case QETH_LINK_TYPE_HSTR:
2467 return 2;
2468 default:
2469 return 1;
2473 static void qeth_fill_ipacmd_header(struct qeth_card *card,
2474 struct qeth_ipa_cmd *cmd, __u8 command,
2475 enum qeth_prot_versions prot)
2477 memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
2478 cmd->hdr.command = command;
2479 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
2480 cmd->hdr.seqno = card->seqno.ipa;
2481 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
2482 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
2483 if (card->options.layer2)
2484 cmd->hdr.prim_version_no = 2;
2485 else
2486 cmd->hdr.prim_version_no = 1;
2487 cmd->hdr.param_count = 1;
2488 cmd->hdr.prot_version = prot;
2489 cmd->hdr.ipa_supported = 0;
2490 cmd->hdr.ipa_enabled = 0;
2493 struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
2494 enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
2496 struct qeth_cmd_buffer *iob;
2497 struct qeth_ipa_cmd *cmd;
2499 iob = qeth_wait_for_buffer(&card->write);
2500 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2501 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
2503 return iob;
2505 EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
2507 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2508 char prot_type)
2510 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2511 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2512 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2513 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2515 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
2517 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2518 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
2519 unsigned long),
2520 void *reply_param)
2522 int rc;
2523 char prot_type;
2525 QETH_DBF_TEXT(TRACE, 4, "sendipa");
2527 if (card->options.layer2)
2528 if (card->info.type == QETH_CARD_TYPE_OSN)
2529 prot_type = QETH_PROT_OSN2;
2530 else
2531 prot_type = QETH_PROT_LAYER2;
2532 else
2533 prot_type = QETH_PROT_TCPIP;
2534 qeth_prepare_ipa_cmd(card, iob, prot_type);
2535 rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
2536 iob, reply_cb, reply_param);
2537 return rc;
2539 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
2541 static int qeth_send_startstoplan(struct qeth_card *card,
2542 enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
2544 int rc;
2545 struct qeth_cmd_buffer *iob;
2547 iob = qeth_get_ipacmd_buffer(card, ipacmd, prot);
2548 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
2550 return rc;
2553 int qeth_send_startlan(struct qeth_card *card)
2555 int rc;
2557 QETH_DBF_TEXT(SETUP, 2, "strtlan");
2559 rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, 0);
2560 return rc;
2562 EXPORT_SYMBOL_GPL(qeth_send_startlan);
2564 int qeth_send_stoplan(struct qeth_card *card)
2566 int rc = 0;
2569 * TODO: according to the IPA format document page 14,
2570 * TCP/IP (we!) never issue a STOPLAN
2571 * is this right ?!?
2573 QETH_DBF_TEXT(SETUP, 2, "stoplan");
2575 rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, 0);
2576 return rc;
2578 EXPORT_SYMBOL_GPL(qeth_send_stoplan);
2580 int qeth_default_setadapterparms_cb(struct qeth_card *card,
2581 struct qeth_reply *reply, unsigned long data)
2583 struct qeth_ipa_cmd *cmd;
2585 QETH_DBF_TEXT(TRACE, 4, "defadpcb");
2587 cmd = (struct qeth_ipa_cmd *) data;
2588 if (cmd->hdr.return_code == 0)
2589 cmd->hdr.return_code =
2590 cmd->data.setadapterparms.hdr.return_code;
2591 return 0;
2593 EXPORT_SYMBOL_GPL(qeth_default_setadapterparms_cb);
2595 static int qeth_query_setadapterparms_cb(struct qeth_card *card,
2596 struct qeth_reply *reply, unsigned long data)
2598 struct qeth_ipa_cmd *cmd;
2600 QETH_DBF_TEXT(TRACE, 3, "quyadpcb");
2602 cmd = (struct qeth_ipa_cmd *) data;
2603 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
2604 card->info.link_type =
2605 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
2606 QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type);
2608 card->options.adp.supported_funcs =
2609 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
2610 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
2613 struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
2614 __u32 command, __u32 cmdlen)
2616 struct qeth_cmd_buffer *iob;
2617 struct qeth_ipa_cmd *cmd;
2619 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
2620 QETH_PROT_IPV4);
2621 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2622 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
2623 cmd->data.setadapterparms.hdr.command_code = command;
2624 cmd->data.setadapterparms.hdr.used_total = 1;
2625 cmd->data.setadapterparms.hdr.seq_no = 1;
2627 return iob;
2629 EXPORT_SYMBOL_GPL(qeth_get_adapter_cmd);
2631 int qeth_query_setadapterparms(struct qeth_card *card)
2633 int rc;
2634 struct qeth_cmd_buffer *iob;
2636 QETH_DBF_TEXT(TRACE, 3, "queryadp");
2637 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
2638 sizeof(struct qeth_ipacmd_setadpparms));
2639 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
2640 return rc;
2642 EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
2644 int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
2645 unsigned int qdio_error, const char *dbftext)
2647 if (qdio_error) {
2648 QETH_DBF_TEXT(TRACE, 2, dbftext);
2649 QETH_DBF_TEXT(QERR, 2, dbftext);
2650 QETH_DBF_TEXT_(QERR, 2, " F15=%02X",
2651 buf->element[15].flags & 0xff);
2652 QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
2653 buf->element[14].flags & 0xff);
2654 QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
2655 if ((buf->element[15].flags & 0xff) == 0x12) {
2656 card->stats.rx_dropped++;
2657 return 0;
2658 } else
2659 return 1;
2661 return 0;
2663 EXPORT_SYMBOL_GPL(qeth_check_qdio_errors);
2665 void qeth_queue_input_buffer(struct qeth_card *card, int index)
2667 struct qeth_qdio_q *queue = card->qdio.in_q;
2668 int count;
2669 int i;
2670 int rc;
2671 int newcount = 0;
2673 count = (index < queue->next_buf_to_init)?
2674 card->qdio.in_buf_pool.buf_count -
2675 (queue->next_buf_to_init - index) :
2676 card->qdio.in_buf_pool.buf_count -
2677 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
2678 /* only requeue at a certain threshold to avoid SIGAs */
2679 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
2680 for (i = queue->next_buf_to_init;
2681 i < queue->next_buf_to_init + count; ++i) {
2682 if (qeth_init_input_buffer(card,
2683 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
2684 break;
2685 } else {
2686 newcount++;
2690 if (newcount < count) {
2691 /* we are in memory shortage so we switch back to
2692 traditional skb allocation and drop packages */
2693 atomic_set(&card->force_alloc_skb, 3);
2694 count = newcount;
2695 } else {
2696 atomic_add_unless(&card->force_alloc_skb, -1, 0);
2700 * according to old code it should be avoided to requeue all
2701 * 128 buffers in order to benefit from PCI avoidance.
2702 * this function keeps at least one buffer (the buffer at
2703 * 'index') un-requeued -> this buffer is the first buffer that
2704 * will be requeued the next time
2706 if (card->options.performance_stats) {
2707 card->perf_stats.inbound_do_qdio_cnt++;
2708 card->perf_stats.inbound_do_qdio_start_time =
2709 qeth_get_micros();
2711 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
2712 queue->next_buf_to_init, count);
2713 if (card->options.performance_stats)
2714 card->perf_stats.inbound_do_qdio_time +=
2715 qeth_get_micros() -
2716 card->perf_stats.inbound_do_qdio_start_time;
2717 if (rc) {
2718 dev_warn(&card->gdev->dev,
2719 "QDIO reported an error, rc=%i\n", rc);
2720 QETH_DBF_TEXT(TRACE, 2, "qinberr");
2721 QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
2723 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
2724 QDIO_MAX_BUFFERS_PER_Q;
2727 EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
2729 static int qeth_handle_send_error(struct qeth_card *card,
2730 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
2732 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2734 QETH_DBF_TEXT(TRACE, 6, "hdsnderr");
2735 if (card->info.type == QETH_CARD_TYPE_IQD) {
2736 if (sbalf15 == 0) {
2737 qdio_err = 0;
2738 } else {
2739 qdio_err = 1;
2742 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
2744 if (!qdio_err)
2745 return QETH_SEND_ERROR_NONE;
2747 if ((sbalf15 >= 15) && (sbalf15 <= 31))
2748 return QETH_SEND_ERROR_RETRY;
2750 QETH_DBF_TEXT(TRACE, 1, "lnkfail");
2751 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
2752 QETH_DBF_TEXT_(TRACE, 1, "%04x %02x",
2753 (u16)qdio_err, (u8)sbalf15);
2754 return QETH_SEND_ERROR_LINK_FAILURE;
2758 * Switched to packing state if the number of used buffers on a queue
2759 * reaches a certain limit.
2761 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2763 if (!queue->do_pack) {
2764 if (atomic_read(&queue->used_buffers)
2765 >= QETH_HIGH_WATERMARK_PACK){
2766 /* switch non-PACKING -> PACKING */
2767 QETH_DBF_TEXT(TRACE, 6, "np->pack");
2768 if (queue->card->options.performance_stats)
2769 queue->card->perf_stats.sc_dp_p++;
2770 queue->do_pack = 1;
2776 * Switches from packing to non-packing mode. If there is a packing
2777 * buffer on the queue this buffer will be prepared to be flushed.
2778 * In that case 1 is returned to inform the caller. If no buffer
2779 * has to be flushed, zero is returned.
2781 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2783 struct qeth_qdio_out_buffer *buffer;
2784 int flush_count = 0;
2786 if (queue->do_pack) {
2787 if (atomic_read(&queue->used_buffers)
2788 <= QETH_LOW_WATERMARK_PACK) {
2789 /* switch PACKING -> non-PACKING */
2790 QETH_DBF_TEXT(TRACE, 6, "pack->np");
2791 if (queue->card->options.performance_stats)
2792 queue->card->perf_stats.sc_p_dp++;
2793 queue->do_pack = 0;
2794 /* flush packing buffers */
2795 buffer = &queue->bufs[queue->next_buf_to_fill];
2796 if ((atomic_read(&buffer->state) ==
2797 QETH_QDIO_BUF_EMPTY) &&
2798 (buffer->next_element_to_fill > 0)) {
2799 atomic_set(&buffer->state,
2800 QETH_QDIO_BUF_PRIMED);
2801 flush_count++;
2802 queue->next_buf_to_fill =
2803 (queue->next_buf_to_fill + 1) %
2804 QDIO_MAX_BUFFERS_PER_Q;
2808 return flush_count;
2812 * Called to flush a packing buffer if no more pci flags are on the queue.
2813 * Checks if there is a packing buffer and prepares it to be flushed.
2814 * In that case returns 1, otherwise zero.
2816 static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2818 struct qeth_qdio_out_buffer *buffer;
2820 buffer = &queue->bufs[queue->next_buf_to_fill];
2821 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
2822 (buffer->next_element_to_fill > 0)) {
2823 /* it's a packing buffer */
2824 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
2825 queue->next_buf_to_fill =
2826 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
2827 return 1;
2829 return 0;
2832 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2833 int count)
2835 struct qeth_qdio_out_buffer *buf;
2836 int rc;
2837 int i;
2838 unsigned int qdio_flags;
2840 for (i = index; i < index + count; ++i) {
2841 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2842 buf->buffer->element[buf->next_element_to_fill - 1].flags |=
2843 SBAL_FLAGS_LAST_ENTRY;
2845 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2846 continue;
2848 if (!queue->do_pack) {
2849 if ((atomic_read(&queue->used_buffers) >=
2850 (QETH_HIGH_WATERMARK_PACK -
2851 QETH_WATERMARK_PACK_FUZZ)) &&
2852 !atomic_read(&queue->set_pci_flags_count)) {
2853 /* it's likely that we'll go to packing
2854 * mode soon */
2855 atomic_inc(&queue->set_pci_flags_count);
2856 buf->buffer->element[0].flags |= 0x40;
2858 } else {
2859 if (!atomic_read(&queue->set_pci_flags_count)) {
2861 * there's no outstanding PCI any more, so we
2862 * have to request a PCI to be sure the the PCI
2863 * will wake at some time in the future then we
2864 * can flush packed buffers that might still be
2865 * hanging around, which can happen if no
2866 * further send was requested by the stack
2868 atomic_inc(&queue->set_pci_flags_count);
2869 buf->buffer->element[0].flags |= 0x40;
2874 queue->sync_iqdio_error = 0;
2875 queue->card->dev->trans_start = jiffies;
2876 if (queue->card->options.performance_stats) {
2877 queue->card->perf_stats.outbound_do_qdio_cnt++;
2878 queue->card->perf_stats.outbound_do_qdio_start_time =
2879 qeth_get_micros();
2881 qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
2882 if (atomic_read(&queue->set_pci_flags_count))
2883 qdio_flags |= QDIO_FLAG_PCI_OUT;
2884 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
2885 queue->queue_no, index, count);
2886 if (queue->card->options.performance_stats)
2887 queue->card->perf_stats.outbound_do_qdio_time +=
2888 qeth_get_micros() -
2889 queue->card->perf_stats.outbound_do_qdio_start_time;
2890 if (rc > 0) {
2891 if (!(rc & QDIO_ERROR_SIGA_BUSY))
2892 queue->sync_iqdio_error = rc & 3;
2894 if (rc) {
2895 queue->card->stats.tx_errors += count;
2896 /* ignore temporary SIGA errors without busy condition */
2897 if (rc == QDIO_ERROR_SIGA_TARGET)
2898 return;
2899 QETH_DBF_TEXT(TRACE, 2, "flushbuf");
2900 QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
2901 QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_DDEV_ID(queue->card));
2903 /* this must not happen under normal circumstances. if it
2904 * happens something is really wrong -> recover */
2905 qeth_schedule_recovery(queue->card);
2906 return;
2908 atomic_add(count, &queue->used_buffers);
2909 if (queue->card->options.performance_stats)
2910 queue->card->perf_stats.bufs_sent += count;
2913 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2915 int index;
2916 int flush_cnt = 0;
2917 int q_was_packing = 0;
2920 * check if weed have to switch to non-packing mode or if
2921 * we have to get a pci flag out on the queue
2923 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
2924 !atomic_read(&queue->set_pci_flags_count)) {
2925 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
2926 QETH_OUT_Q_UNLOCKED) {
2928 * If we get in here, there was no action in
2929 * do_send_packet. So, we check if there is a
2930 * packing buffer to be flushed here.
2932 netif_stop_queue(queue->card->dev);
2933 index = queue->next_buf_to_fill;
2934 q_was_packing = queue->do_pack;
2935 /* queue->do_pack may change */
2936 barrier();
2937 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
2938 if (!flush_cnt &&
2939 !atomic_read(&queue->set_pci_flags_count))
2940 flush_cnt +=
2941 qeth_flush_buffers_on_no_pci(queue);
2942 if (queue->card->options.performance_stats &&
2943 q_was_packing)
2944 queue->card->perf_stats.bufs_sent_pack +=
2945 flush_cnt;
2946 if (flush_cnt)
2947 qeth_flush_buffers(queue, index, flush_cnt);
2948 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
2953 void qeth_qdio_output_handler(struct ccw_device *ccwdev,
2954 unsigned int qdio_error, int __queue, int first_element,
2955 int count, unsigned long card_ptr)
2957 struct qeth_card *card = (struct qeth_card *) card_ptr;
2958 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
2959 struct qeth_qdio_out_buffer *buffer;
2960 int i;
2961 unsigned qeth_send_err;
2963 QETH_DBF_TEXT(TRACE, 6, "qdouhdl");
2964 if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
2965 QETH_DBF_TEXT(TRACE, 2, "achkcond");
2966 QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
2967 netif_stop_queue(card->dev);
2968 qeth_schedule_recovery(card);
2969 return;
2971 if (card->options.performance_stats) {
2972 card->perf_stats.outbound_handler_cnt++;
2973 card->perf_stats.outbound_handler_start_time =
2974 qeth_get_micros();
2976 for (i = first_element; i < (first_element + count); ++i) {
2977 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2978 qeth_send_err = qeth_handle_send_error(card, buffer, qdio_error);
2979 __qeth_clear_output_buffer(queue, buffer,
2980 (qeth_send_err == QETH_SEND_ERROR_RETRY) ? 1 : 0);
2982 atomic_sub(count, &queue->used_buffers);
2983 /* check if we need to do something on this outbound queue */
2984 if (card->info.type != QETH_CARD_TYPE_IQD)
2985 qeth_check_outbound_queue(queue);
2987 netif_wake_queue(queue->card->dev);
2988 if (card->options.performance_stats)
2989 card->perf_stats.outbound_handler_time += qeth_get_micros() -
2990 card->perf_stats.outbound_handler_start_time;
2992 EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
2994 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
2995 int ipv, int cast_type)
2997 if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD ||
2998 card->info.type == QETH_CARD_TYPE_OSX))
2999 return card->qdio.default_out_queue;
3000 switch (card->qdio.no_out_queues) {
3001 case 4:
3002 if (cast_type && card->info.is_multicast_different)
3003 return card->info.is_multicast_different &
3004 (card->qdio.no_out_queues - 1);
3005 if (card->qdio.do_prio_queueing && (ipv == 4)) {
3006 const u8 tos = ip_hdr(skb)->tos;
3008 if (card->qdio.do_prio_queueing ==
3009 QETH_PRIO_Q_ING_TOS) {
3010 if (tos & IP_TOS_NOTIMPORTANT)
3011 return 3;
3012 if (tos & IP_TOS_HIGHRELIABILITY)
3013 return 2;
3014 if (tos & IP_TOS_HIGHTHROUGHPUT)
3015 return 1;
3016 if (tos & IP_TOS_LOWDELAY)
3017 return 0;
3019 if (card->qdio.do_prio_queueing ==
3020 QETH_PRIO_Q_ING_PREC)
3021 return 3 - (tos >> 6);
3022 } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
3023 /* TODO: IPv6!!! */
3025 return card->qdio.default_out_queue;
3026 case 1: /* fallthrough for single-out-queue 1920-device */
3027 default:
3028 return card->qdio.default_out_queue;
3031 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3033 int qeth_get_elements_no(struct qeth_card *card, void *hdr,
3034 struct sk_buff *skb, int elems)
3036 int elements_needed = 0;
3038 if (skb_shinfo(skb)->nr_frags > 0)
3039 elements_needed = (skb_shinfo(skb)->nr_frags + 1);
3040 if (elements_needed == 0)
3041 elements_needed = 1 + (((((unsigned long) skb->data) %
3042 PAGE_SIZE) + skb->len) >> PAGE_SHIFT);
3043 if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
3044 QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
3045 "(Number=%d / Length=%d). Discarded.\n",
3046 (elements_needed+elems), skb->len);
3047 return 0;
3049 return elements_needed;
3051 EXPORT_SYMBOL_GPL(qeth_get_elements_no);
3053 static inline void __qeth_fill_buffer(struct sk_buff *skb,
3054 struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
3055 int offset)
3057 int length = skb->len;
3058 int length_here;
3059 int element;
3060 char *data;
3061 int first_lap ;
3063 element = *next_element_to_fill;
3064 data = skb->data;
3065 first_lap = (is_tso == 0 ? 1 : 0);
3067 if (offset >= 0) {
3068 data = skb->data + offset;
3069 length -= offset;
3070 first_lap = 0;
3073 while (length > 0) {
3074 /* length_here is the remaining amount of data in this page */
3075 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
3076 if (length < length_here)
3077 length_here = length;
3079 buffer->element[element].addr = data;
3080 buffer->element[element].length = length_here;
3081 length -= length_here;
3082 if (!length) {
3083 if (first_lap)
3084 buffer->element[element].flags = 0;
3085 else
3086 buffer->element[element].flags =
3087 SBAL_FLAGS_LAST_FRAG;
3088 } else {
3089 if (first_lap)
3090 buffer->element[element].flags =
3091 SBAL_FLAGS_FIRST_FRAG;
3092 else
3093 buffer->element[element].flags =
3094 SBAL_FLAGS_MIDDLE_FRAG;
3096 data += length_here;
3097 element++;
3098 first_lap = 0;
3100 *next_element_to_fill = element;
3103 static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3104 struct qeth_qdio_out_buffer *buf, struct sk_buff *skb,
3105 struct qeth_hdr *hdr, int offset, int hd_len)
3107 struct qdio_buffer *buffer;
3108 int flush_cnt = 0, hdr_len, large_send = 0;
3110 buffer = buf->buffer;
3111 atomic_inc(&skb->users);
3112 skb_queue_tail(&buf->skb_list, skb);
3114 /*check first on TSO ....*/
3115 if (hdr->hdr.l3.id == QETH_HEADER_TYPE_TSO) {
3116 int element = buf->next_element_to_fill;
3118 hdr_len = sizeof(struct qeth_hdr_tso) +
3119 ((struct qeth_hdr_tso *)hdr)->ext.dg_hdr_len;
3120 /*fill first buffer entry only with header information */
3121 buffer->element[element].addr = skb->data;
3122 buffer->element[element].length = hdr_len;
3123 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
3124 buf->next_element_to_fill++;
3125 skb->data += hdr_len;
3126 skb->len -= hdr_len;
3127 large_send = 1;
3130 if (offset >= 0) {
3131 int element = buf->next_element_to_fill;
3132 buffer->element[element].addr = hdr;
3133 buffer->element[element].length = sizeof(struct qeth_hdr) +
3134 hd_len;
3135 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
3136 buf->is_header[element] = 1;
3137 buf->next_element_to_fill++;
3140 if (skb_shinfo(skb)->nr_frags == 0)
3141 __qeth_fill_buffer(skb, buffer, large_send,
3142 (int *)&buf->next_element_to_fill, offset);
3143 else
3144 __qeth_fill_buffer_frag(skb, buffer, large_send,
3145 (int *)&buf->next_element_to_fill);
3147 if (!queue->do_pack) {
3148 QETH_DBF_TEXT(TRACE, 6, "fillbfnp");
3149 /* set state to PRIMED -> will be flushed */
3150 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3151 flush_cnt = 1;
3152 } else {
3153 QETH_DBF_TEXT(TRACE, 6, "fillbfpa");
3154 if (queue->card->options.performance_stats)
3155 queue->card->perf_stats.skbs_sent_pack++;
3156 if (buf->next_element_to_fill >=
3157 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
3159 * packed buffer if full -> set state PRIMED
3160 * -> will be flushed
3162 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3163 flush_cnt = 1;
3166 return flush_cnt;
3169 int qeth_do_send_packet_fast(struct qeth_card *card,
3170 struct qeth_qdio_out_q *queue, struct sk_buff *skb,
3171 struct qeth_hdr *hdr, int elements_needed,
3172 int offset, int hd_len)
3174 struct qeth_qdio_out_buffer *buffer;
3175 struct sk_buff *skb1;
3176 struct qeth_skb_data *retry_ctrl;
3177 int index;
3178 int rc;
3180 /* spin until we get the queue ... */
3181 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3182 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3183 /* ... now we've got the queue */
3184 index = queue->next_buf_to_fill;
3185 buffer = &queue->bufs[queue->next_buf_to_fill];
3187 * check if buffer is empty to make sure that we do not 'overtake'
3188 * ourselves and try to fill a buffer that is already primed
3190 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3191 goto out;
3192 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
3193 QDIO_MAX_BUFFERS_PER_Q;
3194 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3195 qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
3196 qeth_flush_buffers(queue, index, 1);
3197 if (queue->sync_iqdio_error == 2) {
3198 skb1 = skb_dequeue(&buffer->skb_list);
3199 while (skb1) {
3200 atomic_dec(&skb1->users);
3201 skb1 = skb_dequeue(&buffer->skb_list);
3203 retry_ctrl = (struct qeth_skb_data *) &skb->cb[16];
3204 if (retry_ctrl->magic != QETH_SKB_MAGIC) {
3205 retry_ctrl->magic = QETH_SKB_MAGIC;
3206 retry_ctrl->count = 0;
3208 if (retry_ctrl->count < QETH_SIGA_CC2_RETRIES) {
3209 retry_ctrl->count++;
3210 rc = dev_queue_xmit(skb);
3211 } else {
3212 dev_kfree_skb_any(skb);
3213 QETH_DBF_TEXT(QERR, 2, "qrdrop");
3216 return 0;
3217 out:
3218 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3219 return -EBUSY;
3221 EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
3223 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3224 struct sk_buff *skb, struct qeth_hdr *hdr,
3225 int elements_needed)
3227 struct qeth_qdio_out_buffer *buffer;
3228 int start_index;
3229 int flush_count = 0;
3230 int do_pack = 0;
3231 int tmp;
3232 int rc = 0;
3234 /* spin until we get the queue ... */
3235 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3236 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3237 start_index = queue->next_buf_to_fill;
3238 buffer = &queue->bufs[queue->next_buf_to_fill];
3240 * check if buffer is empty to make sure that we do not 'overtake'
3241 * ourselves and try to fill a buffer that is already primed
3243 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
3244 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3245 return -EBUSY;
3247 /* check if we need to switch packing state of this queue */
3248 qeth_switch_to_packing_if_needed(queue);
3249 if (queue->do_pack) {
3250 do_pack = 1;
3251 /* does packet fit in current buffer? */
3252 if ((QETH_MAX_BUFFER_ELEMENTS(card) -
3253 buffer->next_element_to_fill) < elements_needed) {
3254 /* ... no -> set state PRIMED */
3255 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3256 flush_count++;
3257 queue->next_buf_to_fill =
3258 (queue->next_buf_to_fill + 1) %
3259 QDIO_MAX_BUFFERS_PER_Q;
3260 buffer = &queue->bufs[queue->next_buf_to_fill];
3261 /* we did a step forward, so check buffer state
3262 * again */
3263 if (atomic_read(&buffer->state) !=
3264 QETH_QDIO_BUF_EMPTY) {
3265 qeth_flush_buffers(queue, start_index,
3266 flush_count);
3267 atomic_set(&queue->state,
3268 QETH_OUT_Q_UNLOCKED);
3269 return -EBUSY;
3273 tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0);
3274 queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
3275 QDIO_MAX_BUFFERS_PER_Q;
3276 flush_count += tmp;
3277 if (flush_count)
3278 qeth_flush_buffers(queue, start_index, flush_count);
3279 else if (!atomic_read(&queue->set_pci_flags_count))
3280 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
3282 * queue->state will go from LOCKED -> UNLOCKED or from
3283 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
3284 * (switch packing state or flush buffer to get another pci flag out).
3285 * In that case we will enter this loop
3287 while (atomic_dec_return(&queue->state)) {
3288 flush_count = 0;
3289 start_index = queue->next_buf_to_fill;
3290 /* check if we can go back to non-packing state */
3291 flush_count += qeth_switch_to_nonpacking_if_needed(queue);
3293 * check if we need to flush a packing buffer to get a pci
3294 * flag out on the queue
3296 if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
3297 flush_count += qeth_flush_buffers_on_no_pci(queue);
3298 if (flush_count)
3299 qeth_flush_buffers(queue, start_index, flush_count);
3301 /* at this point the queue is UNLOCKED again */
3302 if (queue->card->options.performance_stats && do_pack)
3303 queue->card->perf_stats.bufs_sent_pack += flush_count;
3305 return rc;
3307 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
3309 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
3310 struct qeth_reply *reply, unsigned long data)
3312 struct qeth_ipa_cmd *cmd;
3313 struct qeth_ipacmd_setadpparms *setparms;
3315 QETH_DBF_TEXT(TRACE, 4, "prmadpcb");
3317 cmd = (struct qeth_ipa_cmd *) data;
3318 setparms = &(cmd->data.setadapterparms);
3320 qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
3321 if (cmd->hdr.return_code) {
3322 QETH_DBF_TEXT_(TRACE, 4, "prmrc%2.2x", cmd->hdr.return_code);
3323 setparms->data.mode = SET_PROMISC_MODE_OFF;
3325 card->info.promisc_mode = setparms->data.mode;
3326 return 0;
3329 void qeth_setadp_promisc_mode(struct qeth_card *card)
3331 enum qeth_ipa_promisc_modes mode;
3332 struct net_device *dev = card->dev;
3333 struct qeth_cmd_buffer *iob;
3334 struct qeth_ipa_cmd *cmd;
3336 QETH_DBF_TEXT(TRACE, 4, "setprom");
3338 if (((dev->flags & IFF_PROMISC) &&
3339 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
3340 (!(dev->flags & IFF_PROMISC) &&
3341 (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
3342 return;
3343 mode = SET_PROMISC_MODE_OFF;
3344 if (dev->flags & IFF_PROMISC)
3345 mode = SET_PROMISC_MODE_ON;
3346 QETH_DBF_TEXT_(TRACE, 4, "mode:%x", mode);
3348 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
3349 sizeof(struct qeth_ipacmd_setadpparms));
3350 cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
3351 cmd->data.setadapterparms.data.mode = mode;
3352 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
3354 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
3356 int qeth_change_mtu(struct net_device *dev, int new_mtu)
3358 struct qeth_card *card;
3359 char dbf_text[15];
3361 card = dev->ml_priv;
3363 QETH_DBF_TEXT(TRACE, 4, "chgmtu");
3364 sprintf(dbf_text, "%8x", new_mtu);
3365 QETH_DBF_TEXT(TRACE, 4, dbf_text);
3367 if (new_mtu < 64)
3368 return -EINVAL;
3369 if (new_mtu > 65535)
3370 return -EINVAL;
3371 if ((!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) &&
3372 (!qeth_mtu_is_valid(card, new_mtu)))
3373 return -EINVAL;
3374 dev->mtu = new_mtu;
3375 return 0;
3377 EXPORT_SYMBOL_GPL(qeth_change_mtu);
3379 struct net_device_stats *qeth_get_stats(struct net_device *dev)
3381 struct qeth_card *card;
3383 card = dev->ml_priv;
3385 QETH_DBF_TEXT(TRACE, 5, "getstat");
3387 return &card->stats;
3389 EXPORT_SYMBOL_GPL(qeth_get_stats);
3391 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
3392 struct qeth_reply *reply, unsigned long data)
3394 struct qeth_ipa_cmd *cmd;
3396 QETH_DBF_TEXT(TRACE, 4, "chgmaccb");
3398 cmd = (struct qeth_ipa_cmd *) data;
3399 if (!card->options.layer2 ||
3400 !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
3401 memcpy(card->dev->dev_addr,
3402 &cmd->data.setadapterparms.data.change_addr.addr,
3403 OSA_ADDR_LEN);
3404 card->info.mac_bits |= QETH_LAYER2_MAC_READ;
3406 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
3407 return 0;
3410 int qeth_setadpparms_change_macaddr(struct qeth_card *card)
3412 int rc;
3413 struct qeth_cmd_buffer *iob;
3414 struct qeth_ipa_cmd *cmd;
3416 QETH_DBF_TEXT(TRACE, 4, "chgmac");
3418 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
3419 sizeof(struct qeth_ipacmd_setadpparms));
3420 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3421 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
3422 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
3423 memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
3424 card->dev->dev_addr, OSA_ADDR_LEN);
3425 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
3426 NULL);
3427 return rc;
3429 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
3431 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
3432 struct qeth_reply *reply, unsigned long data)
3434 struct qeth_ipa_cmd *cmd;
3435 struct qeth_set_access_ctrl *access_ctrl_req;
3436 int rc;
3438 QETH_DBF_TEXT(TRACE, 4, "setaccb");
3440 cmd = (struct qeth_ipa_cmd *) data;
3441 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
3442 QETH_DBF_TEXT_(SETUP, 2, "setaccb");
3443 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
3444 QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
3445 cmd->data.setadapterparms.hdr.return_code);
3446 switch (cmd->data.setadapterparms.hdr.return_code) {
3447 case SET_ACCESS_CTRL_RC_SUCCESS:
3448 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
3449 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
3451 card->options.isolation = access_ctrl_req->subcmd_code;
3452 if (card->options.isolation == ISOLATION_MODE_NONE) {
3453 dev_info(&card->gdev->dev,
3454 "QDIO data connection isolation is deactivated\n");
3455 } else {
3456 dev_info(&card->gdev->dev,
3457 "QDIO data connection isolation is activated\n");
3459 QETH_DBF_MESSAGE(3, "OK:SET_ACCESS_CTRL(%s, %d)==%d\n",
3460 card->gdev->dev.kobj.name,
3461 access_ctrl_req->subcmd_code,
3462 cmd->data.setadapterparms.hdr.return_code);
3463 rc = 0;
3464 break;
3466 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
3468 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
3469 card->gdev->dev.kobj.name,
3470 access_ctrl_req->subcmd_code,
3471 cmd->data.setadapterparms.hdr.return_code);
3472 dev_err(&card->gdev->dev, "Adapter does not "
3473 "support QDIO data connection isolation\n");
3475 /* ensure isolation mode is "none" */
3476 card->options.isolation = ISOLATION_MODE_NONE;
3477 rc = -EOPNOTSUPP;
3478 break;
3480 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
3482 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
3483 card->gdev->dev.kobj.name,
3484 access_ctrl_req->subcmd_code,
3485 cmd->data.setadapterparms.hdr.return_code);
3486 dev_err(&card->gdev->dev,
3487 "Adapter is dedicated. "
3488 "QDIO data connection isolation not supported\n");
3490 /* ensure isolation mode is "none" */
3491 card->options.isolation = ISOLATION_MODE_NONE;
3492 rc = -EOPNOTSUPP;
3493 break;
3495 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
3497 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
3498 card->gdev->dev.kobj.name,
3499 access_ctrl_req->subcmd_code,
3500 cmd->data.setadapterparms.hdr.return_code);
3501 dev_err(&card->gdev->dev,
3502 "TSO does not permit QDIO data connection isolation\n");
3504 /* ensure isolation mode is "none" */
3505 card->options.isolation = ISOLATION_MODE_NONE;
3506 rc = -EPERM;
3507 break;
3509 default:
3511 /* this should never happen */
3512 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d"
3513 "==UNKNOWN\n",
3514 card->gdev->dev.kobj.name,
3515 access_ctrl_req->subcmd_code,
3516 cmd->data.setadapterparms.hdr.return_code);
3518 /* ensure isolation mode is "none" */
3519 card->options.isolation = ISOLATION_MODE_NONE;
3520 rc = 0;
3521 break;
3524 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
3525 return rc;
3528 static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
3529 enum qeth_ipa_isolation_modes isolation)
3531 int rc;
3532 struct qeth_cmd_buffer *iob;
3533 struct qeth_ipa_cmd *cmd;
3534 struct qeth_set_access_ctrl *access_ctrl_req;
3536 QETH_DBF_TEXT(TRACE, 4, "setacctl");
3538 QETH_DBF_TEXT_(SETUP, 2, "setacctl");
3539 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
3541 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
3542 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
3543 sizeof(struct qeth_set_access_ctrl));
3544 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3545 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
3546 access_ctrl_req->subcmd_code = isolation;
3548 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
3549 NULL);
3550 QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
3551 return rc;
3554 int qeth_set_access_ctrl_online(struct qeth_card *card)
3556 int rc = 0;
3558 QETH_DBF_TEXT(TRACE, 4, "setactlo");
3560 if ((card->info.type == QETH_CARD_TYPE_OSD ||
3561 card->info.type == QETH_CARD_TYPE_OSX) &&
3562 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
3563 rc = qeth_setadpparms_set_access_ctrl(card,
3564 card->options.isolation);
3565 if (rc) {
3566 QETH_DBF_MESSAGE(3,
3567 "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n",
3568 card->gdev->dev.kobj.name,
3569 rc);
3571 } else if (card->options.isolation != ISOLATION_MODE_NONE) {
3572 card->options.isolation = ISOLATION_MODE_NONE;
3574 dev_err(&card->gdev->dev, "Adapter does not "
3575 "support QDIO data connection isolation\n");
3576 rc = -EOPNOTSUPP;
3578 return rc;
3580 EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
3582 void qeth_tx_timeout(struct net_device *dev)
3584 struct qeth_card *card;
3586 QETH_DBF_TEXT(TRACE, 4, "txtimeo");
3587 card = dev->ml_priv;
3588 card->stats.tx_errors++;
3589 qeth_schedule_recovery(card);
3591 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
3593 int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
3595 struct qeth_card *card = dev->ml_priv;
3596 int rc = 0;
3598 switch (regnum) {
3599 case MII_BMCR: /* Basic mode control register */
3600 rc = BMCR_FULLDPLX;
3601 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
3602 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
3603 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
3604 rc |= BMCR_SPEED100;
3605 break;
3606 case MII_BMSR: /* Basic mode status register */
3607 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
3608 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
3609 BMSR_100BASE4;
3610 break;
3611 case MII_PHYSID1: /* PHYS ID 1 */
3612 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
3613 dev->dev_addr[2];
3614 rc = (rc >> 5) & 0xFFFF;
3615 break;
3616 case MII_PHYSID2: /* PHYS ID 2 */
3617 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
3618 break;
3619 case MII_ADVERTISE: /* Advertisement control reg */
3620 rc = ADVERTISE_ALL;
3621 break;
3622 case MII_LPA: /* Link partner ability reg */
3623 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
3624 LPA_100BASE4 | LPA_LPACK;
3625 break;
3626 case MII_EXPANSION: /* Expansion register */
3627 break;
3628 case MII_DCOUNTER: /* disconnect counter */
3629 break;
3630 case MII_FCSCOUNTER: /* false carrier counter */
3631 break;
3632 case MII_NWAYTEST: /* N-way auto-neg test register */
3633 break;
3634 case MII_RERRCOUNTER: /* rx error counter */
3635 rc = card->stats.rx_errors;
3636 break;
3637 case MII_SREVISION: /* silicon revision */
3638 break;
3639 case MII_RESV1: /* reserved 1 */
3640 break;
3641 case MII_LBRERROR: /* loopback, rx, bypass error */
3642 break;
3643 case MII_PHYADDR: /* physical address */
3644 break;
3645 case MII_RESV2: /* reserved 2 */
3646 break;
3647 case MII_TPISTATUS: /* TPI status for 10mbps */
3648 break;
3649 case MII_NCONFIG: /* network interface config */
3650 break;
3651 default:
3652 break;
3654 return rc;
3656 EXPORT_SYMBOL_GPL(qeth_mdio_read);
3658 static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
3659 struct qeth_cmd_buffer *iob, int len,
3660 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
3661 unsigned long),
3662 void *reply_param)
3664 u16 s1, s2;
3666 QETH_DBF_TEXT(TRACE, 4, "sendsnmp");
3668 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
3669 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
3670 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3671 /* adjust PDU length fields in IPA_PDU_HEADER */
3672 s1 = (u32) IPA_PDU_HEADER_SIZE + len;
3673 s2 = (u32) len;
3674 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
3675 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
3676 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
3677 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
3678 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
3679 reply_cb, reply_param);
3682 static int qeth_snmp_command_cb(struct qeth_card *card,
3683 struct qeth_reply *reply, unsigned long sdata)
3685 struct qeth_ipa_cmd *cmd;
3686 struct qeth_arp_query_info *qinfo;
3687 struct qeth_snmp_cmd *snmp;
3688 unsigned char *data;
3689 __u16 data_len;
3691 QETH_DBF_TEXT(TRACE, 3, "snpcmdcb");
3693 cmd = (struct qeth_ipa_cmd *) sdata;
3694 data = (unsigned char *)((char *)cmd - reply->offset);
3695 qinfo = (struct qeth_arp_query_info *) reply->param;
3696 snmp = &cmd->data.setadapterparms.data.snmp;
3698 if (cmd->hdr.return_code) {
3699 QETH_DBF_TEXT_(TRACE, 4, "scer1%i", cmd->hdr.return_code);
3700 return 0;
3702 if (cmd->data.setadapterparms.hdr.return_code) {
3703 cmd->hdr.return_code =
3704 cmd->data.setadapterparms.hdr.return_code;
3705 QETH_DBF_TEXT_(TRACE, 4, "scer2%i", cmd->hdr.return_code);
3706 return 0;
3708 data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
3709 if (cmd->data.setadapterparms.hdr.seq_no == 1)
3710 data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
3711 else
3712 data_len -= (__u16)((char *)&snmp->request - (char *)cmd);
3714 /* check if there is enough room in userspace */
3715 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
3716 QETH_DBF_TEXT_(TRACE, 4, "scer3%i", -ENOMEM);
3717 cmd->hdr.return_code = -ENOMEM;
3718 return 0;
3720 QETH_DBF_TEXT_(TRACE, 4, "snore%i",
3721 cmd->data.setadapterparms.hdr.used_total);
3722 QETH_DBF_TEXT_(TRACE, 4, "sseqn%i",
3723 cmd->data.setadapterparms.hdr.seq_no);
3724 /*copy entries to user buffer*/
3725 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
3726 memcpy(qinfo->udata + qinfo->udata_offset,
3727 (char *)snmp,
3728 data_len + offsetof(struct qeth_snmp_cmd, data));
3729 qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
3730 } else {
3731 memcpy(qinfo->udata + qinfo->udata_offset,
3732 (char *)&snmp->request, data_len);
3734 qinfo->udata_offset += data_len;
3735 /* check if all replies received ... */
3736 QETH_DBF_TEXT_(TRACE, 4, "srtot%i",
3737 cmd->data.setadapterparms.hdr.used_total);
3738 QETH_DBF_TEXT_(TRACE, 4, "srseq%i",
3739 cmd->data.setadapterparms.hdr.seq_no);
3740 if (cmd->data.setadapterparms.hdr.seq_no <
3741 cmd->data.setadapterparms.hdr.used_total)
3742 return 1;
3743 return 0;
3746 int qeth_snmp_command(struct qeth_card *card, char __user *udata)
3748 struct qeth_cmd_buffer *iob;
3749 struct qeth_ipa_cmd *cmd;
3750 struct qeth_snmp_ureq *ureq;
3751 int req_len;
3752 struct qeth_arp_query_info qinfo = {0, };
3753 int rc = 0;
3755 QETH_DBF_TEXT(TRACE, 3, "snmpcmd");
3757 if (card->info.guestlan)
3758 return -EOPNOTSUPP;
3760 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
3761 (!card->options.layer2)) {
3762 return -EOPNOTSUPP;
3764 /* skip 4 bytes (data_len struct member) to get req_len */
3765 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
3766 return -EFAULT;
3767 ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
3768 if (!ureq) {
3769 QETH_DBF_TEXT(TRACE, 2, "snmpnome");
3770 return -ENOMEM;
3772 if (copy_from_user(ureq, udata,
3773 req_len + sizeof(struct qeth_snmp_ureq_hdr))) {
3774 kfree(ureq);
3775 return -EFAULT;
3777 qinfo.udata_len = ureq->hdr.data_len;
3778 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
3779 if (!qinfo.udata) {
3780 kfree(ureq);
3781 return -ENOMEM;
3783 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
3785 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
3786 QETH_SNMP_SETADP_CMDLENGTH + req_len);
3787 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3788 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
3789 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
3790 qeth_snmp_command_cb, (void *)&qinfo);
3791 if (rc)
3792 QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n",
3793 QETH_CARD_IFNAME(card), rc);
3794 else {
3795 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
3796 rc = -EFAULT;
3799 kfree(ureq);
3800 kfree(qinfo.udata);
3801 return rc;
3803 EXPORT_SYMBOL_GPL(qeth_snmp_command);
3805 static inline int qeth_get_qdio_q_format(struct qeth_card *card)
3807 switch (card->info.type) {
3808 case QETH_CARD_TYPE_IQD:
3809 return 2;
3810 default:
3811 return 0;
3815 static int qeth_qdio_establish(struct qeth_card *card)
3817 struct qdio_initialize init_data;
3818 char *qib_param_field;
3819 struct qdio_buffer **in_sbal_ptrs;
3820 struct qdio_buffer **out_sbal_ptrs;
3821 int i, j, k;
3822 int rc = 0;
3824 QETH_DBF_TEXT(SETUP, 2, "qdioest");
3826 qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
3827 GFP_KERNEL);
3828 if (!qib_param_field)
3829 return -ENOMEM;
3831 qeth_create_qib_param_field(card, qib_param_field);
3832 qeth_create_qib_param_field_blkt(card, qib_param_field);
3834 in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
3835 GFP_KERNEL);
3836 if (!in_sbal_ptrs) {
3837 kfree(qib_param_field);
3838 return -ENOMEM;
3840 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3841 in_sbal_ptrs[i] = (struct qdio_buffer *)
3842 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
3844 out_sbal_ptrs =
3845 kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
3846 sizeof(void *), GFP_KERNEL);
3847 if (!out_sbal_ptrs) {
3848 kfree(in_sbal_ptrs);
3849 kfree(qib_param_field);
3850 return -ENOMEM;
3852 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
3853 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
3854 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
3855 card->qdio.out_qs[i]->bufs[j].buffer);
3858 memset(&init_data, 0, sizeof(struct qdio_initialize));
3859 init_data.cdev = CARD_DDEV(card);
3860 init_data.q_format = qeth_get_qdio_q_format(card);
3861 init_data.qib_param_field_format = 0;
3862 init_data.qib_param_field = qib_param_field;
3863 init_data.no_input_qs = 1;
3864 init_data.no_output_qs = card->qdio.no_out_queues;
3865 init_data.input_handler = card->discipline.input_handler;
3866 init_data.output_handler = card->discipline.output_handler;
3867 init_data.int_parm = (unsigned long) card;
3868 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3869 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3871 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
3872 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
3873 rc = qdio_allocate(&init_data);
3874 if (rc) {
3875 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
3876 goto out;
3878 rc = qdio_establish(&init_data);
3879 if (rc) {
3880 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
3881 qdio_free(CARD_DDEV(card));
3884 out:
3885 kfree(out_sbal_ptrs);
3886 kfree(in_sbal_ptrs);
3887 kfree(qib_param_field);
3888 return rc;
3891 static void qeth_core_free_card(struct qeth_card *card)
3894 QETH_DBF_TEXT(SETUP, 2, "freecrd");
3895 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
3896 qeth_clean_channel(&card->read);
3897 qeth_clean_channel(&card->write);
3898 if (card->dev)
3899 free_netdev(card->dev);
3900 kfree(card->ip_tbd_list);
3901 qeth_free_qdio_buffers(card);
3902 unregister_service_level(&card->qeth_service_level);
3903 kfree(card);
3906 static struct ccw_device_id qeth_ids[] = {
3907 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
3908 .driver_info = QETH_CARD_TYPE_OSD},
3909 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
3910 .driver_info = QETH_CARD_TYPE_IQD},
3911 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
3912 .driver_info = QETH_CARD_TYPE_OSN},
3913 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
3914 .driver_info = QETH_CARD_TYPE_OSM},
3915 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
3916 .driver_info = QETH_CARD_TYPE_OSX},
3919 MODULE_DEVICE_TABLE(ccw, qeth_ids);
3921 static struct ccw_driver qeth_ccw_driver = {
3922 .name = "qeth",
3923 .ids = qeth_ids,
3924 .probe = ccwgroup_probe_ccwdev,
3925 .remove = ccwgroup_remove_ccwdev,
3928 static int qeth_core_driver_group(const char *buf, struct device *root_dev,
3929 unsigned long driver_id)
3931 return ccwgroup_create_from_string(root_dev, driver_id,
3932 &qeth_ccw_driver, 3, buf);
3935 int qeth_core_hardsetup_card(struct qeth_card *card)
3937 int retries = 0;
3938 int rc;
3940 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
3941 atomic_set(&card->force_alloc_skb, 0);
3942 retry:
3943 if (retries)
3944 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
3945 dev_name(&card->gdev->dev));
3946 ccw_device_set_offline(CARD_DDEV(card));
3947 ccw_device_set_offline(CARD_WDEV(card));
3948 ccw_device_set_offline(CARD_RDEV(card));
3949 rc = ccw_device_set_online(CARD_RDEV(card));
3950 if (rc)
3951 goto retriable;
3952 rc = ccw_device_set_online(CARD_WDEV(card));
3953 if (rc)
3954 goto retriable;
3955 rc = ccw_device_set_online(CARD_DDEV(card));
3956 if (rc)
3957 goto retriable;
3958 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
3959 retriable:
3960 if (rc == -ERESTARTSYS) {
3961 QETH_DBF_TEXT(SETUP, 2, "break1");
3962 return rc;
3963 } else if (rc) {
3964 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3965 if (++retries > 3)
3966 goto out;
3967 else
3968 goto retry;
3970 qeth_init_tokens(card);
3971 qeth_init_func_level(card);
3972 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
3973 if (rc == -ERESTARTSYS) {
3974 QETH_DBF_TEXT(SETUP, 2, "break2");
3975 return rc;
3976 } else if (rc) {
3977 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
3978 if (--retries < 0)
3979 goto out;
3980 else
3981 goto retry;
3983 rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
3984 if (rc == -ERESTARTSYS) {
3985 QETH_DBF_TEXT(SETUP, 2, "break3");
3986 return rc;
3987 } else if (rc) {
3988 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
3989 if (--retries < 0)
3990 goto out;
3991 else
3992 goto retry;
3994 rc = qeth_mpc_initialize(card);
3995 if (rc) {
3996 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
3997 goto out;
3999 return 0;
4000 out:
4001 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
4002 "an error on the device\n");
4003 QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n",
4004 dev_name(&card->gdev->dev), rc);
4005 return rc;
4007 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
4009 static inline int qeth_create_skb_frag(struct qdio_buffer_element *element,
4010 struct sk_buff **pskb, int offset, int *pfrag, int data_len)
4012 struct page *page = virt_to_page(element->addr);
4013 if (*pskb == NULL) {
4014 /* the upper protocol layers assume that there is data in the
4015 * skb itself. Copy a small amount (64 bytes) to make them
4016 * happy. */
4017 *pskb = dev_alloc_skb(64 + ETH_HLEN);
4018 if (!(*pskb))
4019 return -ENOMEM;
4020 skb_reserve(*pskb, ETH_HLEN);
4021 if (data_len <= 64) {
4022 memcpy(skb_put(*pskb, data_len), element->addr + offset,
4023 data_len);
4024 } else {
4025 get_page(page);
4026 memcpy(skb_put(*pskb, 64), element->addr + offset, 64);
4027 skb_fill_page_desc(*pskb, *pfrag, page, offset + 64,
4028 data_len - 64);
4029 (*pskb)->data_len += data_len - 64;
4030 (*pskb)->len += data_len - 64;
4031 (*pskb)->truesize += data_len - 64;
4032 (*pfrag)++;
4034 } else {
4035 get_page(page);
4036 skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len);
4037 (*pskb)->data_len += data_len;
4038 (*pskb)->len += data_len;
4039 (*pskb)->truesize += data_len;
4040 (*pfrag)++;
4042 return 0;
4045 struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
4046 struct qdio_buffer *buffer,
4047 struct qdio_buffer_element **__element, int *__offset,
4048 struct qeth_hdr **hdr)
4050 struct qdio_buffer_element *element = *__element;
4051 int offset = *__offset;
4052 struct sk_buff *skb = NULL;
4053 int skb_len = 0;
4054 void *data_ptr;
4055 int data_len;
4056 int headroom = 0;
4057 int use_rx_sg = 0;
4058 int frag = 0;
4060 /* qeth_hdr must not cross element boundaries */
4061 if (element->length < offset + sizeof(struct qeth_hdr)) {
4062 if (qeth_is_last_sbale(element))
4063 return NULL;
4064 element++;
4065 offset = 0;
4066 if (element->length < sizeof(struct qeth_hdr))
4067 return NULL;
4069 *hdr = element->addr + offset;
4071 offset += sizeof(struct qeth_hdr);
4072 switch ((*hdr)->hdr.l2.id) {
4073 case QETH_HEADER_TYPE_LAYER2:
4074 skb_len = (*hdr)->hdr.l2.pkt_length;
4075 break;
4076 case QETH_HEADER_TYPE_LAYER3:
4077 skb_len = (*hdr)->hdr.l3.length;
4078 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
4079 (card->info.link_type == QETH_LINK_TYPE_HSTR))
4080 headroom = TR_HLEN;
4081 else
4082 headroom = ETH_HLEN;
4083 break;
4084 case QETH_HEADER_TYPE_OSN:
4085 skb_len = (*hdr)->hdr.osn.pdu_length;
4086 headroom = sizeof(struct qeth_hdr);
4087 break;
4088 default:
4089 break;
4092 if (!skb_len)
4093 return NULL;
4095 if ((skb_len >= card->options.rx_sg_cb) &&
4096 (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
4097 (!atomic_read(&card->force_alloc_skb))) {
4098 use_rx_sg = 1;
4099 } else {
4100 skb = dev_alloc_skb(skb_len + headroom);
4101 if (!skb)
4102 goto no_mem;
4103 if (headroom)
4104 skb_reserve(skb, headroom);
4107 data_ptr = element->addr + offset;
4108 while (skb_len) {
4109 data_len = min(skb_len, (int)(element->length - offset));
4110 if (data_len) {
4111 if (use_rx_sg) {
4112 if (qeth_create_skb_frag(element, &skb, offset,
4113 &frag, data_len))
4114 goto no_mem;
4115 } else {
4116 memcpy(skb_put(skb, data_len), data_ptr,
4117 data_len);
4120 skb_len -= data_len;
4121 if (skb_len) {
4122 if (qeth_is_last_sbale(element)) {
4123 QETH_DBF_TEXT(TRACE, 4, "unexeob");
4124 QETH_DBF_TEXT_(TRACE, 4, "%s",
4125 CARD_BUS_ID(card));
4126 QETH_DBF_TEXT(QERR, 2, "unexeob");
4127 QETH_DBF_TEXT_(QERR, 2, "%s",
4128 CARD_BUS_ID(card));
4129 QETH_DBF_HEX(MISC, 4, buffer, sizeof(*buffer));
4130 dev_kfree_skb_any(skb);
4131 card->stats.rx_errors++;
4132 return NULL;
4134 element++;
4135 offset = 0;
4136 data_ptr = element->addr;
4137 } else {
4138 offset += data_len;
4141 *__element = element;
4142 *__offset = offset;
4143 if (use_rx_sg && card->options.performance_stats) {
4144 card->perf_stats.sg_skbs_rx++;
4145 card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
4147 return skb;
4148 no_mem:
4149 if (net_ratelimit()) {
4150 QETH_DBF_TEXT(TRACE, 2, "noskbmem");
4151 QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
4153 card->stats.rx_dropped++;
4154 return NULL;
4156 EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
4158 static void qeth_unregister_dbf_views(void)
4160 int x;
4161 for (x = 0; x < QETH_DBF_INFOS; x++) {
4162 debug_unregister(qeth_dbf[x].id);
4163 qeth_dbf[x].id = NULL;
4167 void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *fmt, ...)
4169 char dbf_txt_buf[32];
4170 va_list args;
4172 if (level > (qeth_dbf[dbf_nix].id)->level)
4173 return;
4174 va_start(args, fmt);
4175 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
4176 va_end(args);
4177 debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf);
4179 EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
4181 static int qeth_register_dbf_views(void)
4183 int ret;
4184 int x;
4186 for (x = 0; x < QETH_DBF_INFOS; x++) {
4187 /* register the areas */
4188 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
4189 qeth_dbf[x].pages,
4190 qeth_dbf[x].areas,
4191 qeth_dbf[x].len);
4192 if (qeth_dbf[x].id == NULL) {
4193 qeth_unregister_dbf_views();
4194 return -ENOMEM;
4197 /* register a view */
4198 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
4199 if (ret) {
4200 qeth_unregister_dbf_views();
4201 return ret;
4204 /* set a passing level */
4205 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
4208 return 0;
4211 int qeth_core_load_discipline(struct qeth_card *card,
4212 enum qeth_discipline_id discipline)
4214 int rc = 0;
4215 switch (discipline) {
4216 case QETH_DISCIPLINE_LAYER3:
4217 card->discipline.ccwgdriver = try_then_request_module(
4218 symbol_get(qeth_l3_ccwgroup_driver),
4219 "qeth_l3");
4220 break;
4221 case QETH_DISCIPLINE_LAYER2:
4222 card->discipline.ccwgdriver = try_then_request_module(
4223 symbol_get(qeth_l2_ccwgroup_driver),
4224 "qeth_l2");
4225 break;
4227 if (!card->discipline.ccwgdriver) {
4228 dev_err(&card->gdev->dev, "There is no kernel module to "
4229 "support discipline %d\n", discipline);
4230 rc = -EINVAL;
4232 return rc;
4235 void qeth_core_free_discipline(struct qeth_card *card)
4237 if (card->options.layer2)
4238 symbol_put(qeth_l2_ccwgroup_driver);
4239 else
4240 symbol_put(qeth_l3_ccwgroup_driver);
4241 card->discipline.ccwgdriver = NULL;
4244 static void qeth_determine_capabilities(struct qeth_card *card)
4246 int rc;
4247 int length;
4248 char *prcd;
4250 QETH_DBF_TEXT(SETUP, 2, "detcapab");
4251 rc = ccw_device_set_online(CARD_DDEV(card));
4252 if (rc) {
4253 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4254 goto out;
4258 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
4259 if (rc) {
4260 QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
4261 dev_name(&card->gdev->dev), rc);
4262 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
4263 goto out_offline;
4265 qeth_configure_unitaddr(card, prcd);
4266 qeth_configure_blkt_default(card, prcd);
4267 kfree(prcd);
4269 rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
4270 if (rc)
4271 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
4273 out_offline:
4274 ccw_device_set_offline(CARD_DDEV(card));
4275 out:
4276 return;
4279 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4281 struct qeth_card *card;
4282 struct device *dev;
4283 int rc;
4284 unsigned long flags;
4286 QETH_DBF_TEXT(SETUP, 2, "probedev");
4288 dev = &gdev->dev;
4289 if (!get_device(dev))
4290 return -ENODEV;
4292 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
4294 card = qeth_alloc_card();
4295 if (!card) {
4296 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
4297 rc = -ENOMEM;
4298 goto err_dev;
4300 card->read.ccwdev = gdev->cdev[0];
4301 card->write.ccwdev = gdev->cdev[1];
4302 card->data.ccwdev = gdev->cdev[2];
4303 dev_set_drvdata(&gdev->dev, card);
4304 card->gdev = gdev;
4305 gdev->cdev[0]->handler = qeth_irq;
4306 gdev->cdev[1]->handler = qeth_irq;
4307 gdev->cdev[2]->handler = qeth_irq;
4309 rc = qeth_determine_card_type(card);
4310 if (rc) {
4311 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4312 goto err_card;
4314 rc = qeth_setup_card(card);
4315 if (rc) {
4316 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
4317 goto err_card;
4320 if (card->info.type == QETH_CARD_TYPE_OSN)
4321 rc = qeth_core_create_osn_attributes(dev);
4322 else
4323 rc = qeth_core_create_device_attributes(dev);
4324 if (rc)
4325 goto err_card;
4326 switch (card->info.type) {
4327 case QETH_CARD_TYPE_OSN:
4328 case QETH_CARD_TYPE_OSM:
4329 rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
4330 if (rc)
4331 goto err_attr;
4332 rc = card->discipline.ccwgdriver->probe(card->gdev);
4333 if (rc)
4334 goto err_disc;
4335 case QETH_CARD_TYPE_OSD:
4336 case QETH_CARD_TYPE_OSX:
4337 default:
4338 break;
4341 write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
4342 list_add_tail(&card->list, &qeth_core_card_list.list);
4343 write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
4345 qeth_determine_capabilities(card);
4346 return 0;
4348 err_disc:
4349 qeth_core_free_discipline(card);
4350 err_attr:
4351 if (card->info.type == QETH_CARD_TYPE_OSN)
4352 qeth_core_remove_osn_attributes(dev);
4353 else
4354 qeth_core_remove_device_attributes(dev);
4355 err_card:
4356 qeth_core_free_card(card);
4357 err_dev:
4358 put_device(dev);
4359 return rc;
4362 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
4364 unsigned long flags;
4365 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4367 QETH_DBF_TEXT(SETUP, 2, "removedv");
4368 if (card->discipline.ccwgdriver) {
4369 card->discipline.ccwgdriver->remove(gdev);
4370 qeth_core_free_discipline(card);
4373 if (card->info.type == QETH_CARD_TYPE_OSN) {
4374 qeth_core_remove_osn_attributes(&gdev->dev);
4375 } else {
4376 qeth_core_remove_device_attributes(&gdev->dev);
4378 write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
4379 list_del(&card->list);
4380 write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
4381 qeth_core_free_card(card);
4382 dev_set_drvdata(&gdev->dev, NULL);
4383 put_device(&gdev->dev);
4384 return;
4387 static int qeth_core_set_online(struct ccwgroup_device *gdev)
4389 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4390 int rc = 0;
4391 int def_discipline;
4393 if (!card->discipline.ccwgdriver) {
4394 if (card->info.type == QETH_CARD_TYPE_IQD)
4395 def_discipline = QETH_DISCIPLINE_LAYER3;
4396 else
4397 def_discipline = QETH_DISCIPLINE_LAYER2;
4398 rc = qeth_core_load_discipline(card, def_discipline);
4399 if (rc)
4400 goto err;
4401 rc = card->discipline.ccwgdriver->probe(card->gdev);
4402 if (rc)
4403 goto err;
4405 rc = card->discipline.ccwgdriver->set_online(gdev);
4406 err:
4407 return rc;
4410 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
4412 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4413 return card->discipline.ccwgdriver->set_offline(gdev);
4416 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
4418 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4419 if (card->discipline.ccwgdriver &&
4420 card->discipline.ccwgdriver->shutdown)
4421 card->discipline.ccwgdriver->shutdown(gdev);
4424 static int qeth_core_prepare(struct ccwgroup_device *gdev)
4426 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4427 if (card->discipline.ccwgdriver &&
4428 card->discipline.ccwgdriver->prepare)
4429 return card->discipline.ccwgdriver->prepare(gdev);
4430 return 0;
4433 static void qeth_core_complete(struct ccwgroup_device *gdev)
4435 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4436 if (card->discipline.ccwgdriver &&
4437 card->discipline.ccwgdriver->complete)
4438 card->discipline.ccwgdriver->complete(gdev);
4441 static int qeth_core_freeze(struct ccwgroup_device *gdev)
4443 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4444 if (card->discipline.ccwgdriver &&
4445 card->discipline.ccwgdriver->freeze)
4446 return card->discipline.ccwgdriver->freeze(gdev);
4447 return 0;
4450 static int qeth_core_thaw(struct ccwgroup_device *gdev)
4452 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4453 if (card->discipline.ccwgdriver &&
4454 card->discipline.ccwgdriver->thaw)
4455 return card->discipline.ccwgdriver->thaw(gdev);
4456 return 0;
4459 static int qeth_core_restore(struct ccwgroup_device *gdev)
4461 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4462 if (card->discipline.ccwgdriver &&
4463 card->discipline.ccwgdriver->restore)
4464 return card->discipline.ccwgdriver->restore(gdev);
4465 return 0;
4468 static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
4469 .owner = THIS_MODULE,
4470 .name = "qeth",
4471 .driver_id = 0xD8C5E3C8,
4472 .probe = qeth_core_probe_device,
4473 .remove = qeth_core_remove_device,
4474 .set_online = qeth_core_set_online,
4475 .set_offline = qeth_core_set_offline,
4476 .shutdown = qeth_core_shutdown,
4477 .prepare = qeth_core_prepare,
4478 .complete = qeth_core_complete,
4479 .freeze = qeth_core_freeze,
4480 .thaw = qeth_core_thaw,
4481 .restore = qeth_core_restore,
4484 static ssize_t
4485 qeth_core_driver_group_store(struct device_driver *ddrv, const char *buf,
4486 size_t count)
4488 int err;
4489 err = qeth_core_driver_group(buf, qeth_core_root_dev,
4490 qeth_core_ccwgroup_driver.driver_id);
4491 if (err)
4492 return err;
4493 else
4494 return count;
4497 static DRIVER_ATTR(group, 0200, NULL, qeth_core_driver_group_store);
4499 static struct {
4500 const char str[ETH_GSTRING_LEN];
4501 } qeth_ethtool_stats_keys[] = {
4502 /* 0 */{"rx skbs"},
4503 {"rx buffers"},
4504 {"tx skbs"},
4505 {"tx buffers"},
4506 {"tx skbs no packing"},
4507 {"tx buffers no packing"},
4508 {"tx skbs packing"},
4509 {"tx buffers packing"},
4510 {"tx sg skbs"},
4511 {"tx sg frags"},
4512 /* 10 */{"rx sg skbs"},
4513 {"rx sg frags"},
4514 {"rx sg page allocs"},
4515 {"tx large kbytes"},
4516 {"tx large count"},
4517 {"tx pk state ch n->p"},
4518 {"tx pk state ch p->n"},
4519 {"tx pk watermark low"},
4520 {"tx pk watermark high"},
4521 {"queue 0 buffer usage"},
4522 /* 20 */{"queue 1 buffer usage"},
4523 {"queue 2 buffer usage"},
4524 {"queue 3 buffer usage"},
4525 {"rx handler time"},
4526 {"rx handler count"},
4527 {"rx do_QDIO time"},
4528 {"rx do_QDIO count"},
4529 {"tx handler time"},
4530 {"tx handler count"},
4531 {"tx time"},
4532 /* 30 */{"tx count"},
4533 {"tx do_QDIO time"},
4534 {"tx do_QDIO count"},
4535 {"tx csum"},
4536 {"tx lin"},
4539 int qeth_core_get_sset_count(struct net_device *dev, int stringset)
4541 switch (stringset) {
4542 case ETH_SS_STATS:
4543 return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
4544 default:
4545 return -EINVAL;
4548 EXPORT_SYMBOL_GPL(qeth_core_get_sset_count);
4550 void qeth_core_get_ethtool_stats(struct net_device *dev,
4551 struct ethtool_stats *stats, u64 *data)
4553 struct qeth_card *card = dev->ml_priv;
4554 data[0] = card->stats.rx_packets -
4555 card->perf_stats.initial_rx_packets;
4556 data[1] = card->perf_stats.bufs_rec;
4557 data[2] = card->stats.tx_packets -
4558 card->perf_stats.initial_tx_packets;
4559 data[3] = card->perf_stats.bufs_sent;
4560 data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets
4561 - card->perf_stats.skbs_sent_pack;
4562 data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack;
4563 data[6] = card->perf_stats.skbs_sent_pack;
4564 data[7] = card->perf_stats.bufs_sent_pack;
4565 data[8] = card->perf_stats.sg_skbs_sent;
4566 data[9] = card->perf_stats.sg_frags_sent;
4567 data[10] = card->perf_stats.sg_skbs_rx;
4568 data[11] = card->perf_stats.sg_frags_rx;
4569 data[12] = card->perf_stats.sg_alloc_page_rx;
4570 data[13] = (card->perf_stats.large_send_bytes >> 10);
4571 data[14] = card->perf_stats.large_send_cnt;
4572 data[15] = card->perf_stats.sc_dp_p;
4573 data[16] = card->perf_stats.sc_p_dp;
4574 data[17] = QETH_LOW_WATERMARK_PACK;
4575 data[18] = QETH_HIGH_WATERMARK_PACK;
4576 data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers);
4577 data[20] = (card->qdio.no_out_queues > 1) ?
4578 atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0;
4579 data[21] = (card->qdio.no_out_queues > 2) ?
4580 atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0;
4581 data[22] = (card->qdio.no_out_queues > 3) ?
4582 atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0;
4583 data[23] = card->perf_stats.inbound_time;
4584 data[24] = card->perf_stats.inbound_cnt;
4585 data[25] = card->perf_stats.inbound_do_qdio_time;
4586 data[26] = card->perf_stats.inbound_do_qdio_cnt;
4587 data[27] = card->perf_stats.outbound_handler_time;
4588 data[28] = card->perf_stats.outbound_handler_cnt;
4589 data[29] = card->perf_stats.outbound_time;
4590 data[30] = card->perf_stats.outbound_cnt;
4591 data[31] = card->perf_stats.outbound_do_qdio_time;
4592 data[32] = card->perf_stats.outbound_do_qdio_cnt;
4593 data[33] = card->perf_stats.tx_csum;
4594 data[34] = card->perf_stats.tx_lin;
4596 EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
4598 void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4600 switch (stringset) {
4601 case ETH_SS_STATS:
4602 memcpy(data, &qeth_ethtool_stats_keys,
4603 sizeof(qeth_ethtool_stats_keys));
4604 break;
4605 default:
4606 WARN_ON(1);
4607 break;
4610 EXPORT_SYMBOL_GPL(qeth_core_get_strings);
4612 void qeth_core_get_drvinfo(struct net_device *dev,
4613 struct ethtool_drvinfo *info)
4615 struct qeth_card *card = dev->ml_priv;
4616 if (card->options.layer2)
4617 strcpy(info->driver, "qeth_l2");
4618 else
4619 strcpy(info->driver, "qeth_l3");
4621 strcpy(info->version, "1.0");
4622 strcpy(info->fw_version, card->info.mcl_level);
4623 sprintf(info->bus_info, "%s/%s/%s",
4624 CARD_RDEV_ID(card),
4625 CARD_WDEV_ID(card),
4626 CARD_DDEV_ID(card));
4628 EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
4630 int qeth_core_ethtool_get_settings(struct net_device *netdev,
4631 struct ethtool_cmd *ecmd)
4633 struct qeth_card *card = netdev->ml_priv;
4634 enum qeth_link_types link_type;
4636 if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
4637 link_type = QETH_LINK_TYPE_10GBIT_ETH;
4638 else
4639 link_type = card->info.link_type;
4641 ecmd->transceiver = XCVR_INTERNAL;
4642 ecmd->supported = SUPPORTED_Autoneg;
4643 ecmd->advertising = ADVERTISED_Autoneg;
4644 ecmd->duplex = DUPLEX_FULL;
4645 ecmd->autoneg = AUTONEG_ENABLE;
4647 switch (link_type) {
4648 case QETH_LINK_TYPE_FAST_ETH:
4649 case QETH_LINK_TYPE_LANE_ETH100:
4650 ecmd->supported |= SUPPORTED_10baseT_Half |
4651 SUPPORTED_10baseT_Full |
4652 SUPPORTED_100baseT_Half |
4653 SUPPORTED_100baseT_Full |
4654 SUPPORTED_TP;
4655 ecmd->advertising |= ADVERTISED_10baseT_Half |
4656 ADVERTISED_10baseT_Full |
4657 ADVERTISED_100baseT_Half |
4658 ADVERTISED_100baseT_Full |
4659 ADVERTISED_TP;
4660 ecmd->speed = SPEED_100;
4661 ecmd->port = PORT_TP;
4662 break;
4664 case QETH_LINK_TYPE_GBIT_ETH:
4665 case QETH_LINK_TYPE_LANE_ETH1000:
4666 ecmd->supported |= SUPPORTED_10baseT_Half |
4667 SUPPORTED_10baseT_Full |
4668 SUPPORTED_100baseT_Half |
4669 SUPPORTED_100baseT_Full |
4670 SUPPORTED_1000baseT_Half |
4671 SUPPORTED_1000baseT_Full |
4672 SUPPORTED_FIBRE;
4673 ecmd->advertising |= ADVERTISED_10baseT_Half |
4674 ADVERTISED_10baseT_Full |
4675 ADVERTISED_100baseT_Half |
4676 ADVERTISED_100baseT_Full |
4677 ADVERTISED_1000baseT_Half |
4678 ADVERTISED_1000baseT_Full |
4679 ADVERTISED_FIBRE;
4680 ecmd->speed = SPEED_1000;
4681 ecmd->port = PORT_FIBRE;
4682 break;
4684 case QETH_LINK_TYPE_10GBIT_ETH:
4685 ecmd->supported |= SUPPORTED_10baseT_Half |
4686 SUPPORTED_10baseT_Full |
4687 SUPPORTED_100baseT_Half |
4688 SUPPORTED_100baseT_Full |
4689 SUPPORTED_1000baseT_Half |
4690 SUPPORTED_1000baseT_Full |
4691 SUPPORTED_10000baseT_Full |
4692 SUPPORTED_FIBRE;
4693 ecmd->advertising |= ADVERTISED_10baseT_Half |
4694 ADVERTISED_10baseT_Full |
4695 ADVERTISED_100baseT_Half |
4696 ADVERTISED_100baseT_Full |
4697 ADVERTISED_1000baseT_Half |
4698 ADVERTISED_1000baseT_Full |
4699 ADVERTISED_10000baseT_Full |
4700 ADVERTISED_FIBRE;
4701 ecmd->speed = SPEED_10000;
4702 ecmd->port = PORT_FIBRE;
4703 break;
4705 default:
4706 ecmd->supported |= SUPPORTED_10baseT_Half |
4707 SUPPORTED_10baseT_Full |
4708 SUPPORTED_TP;
4709 ecmd->advertising |= ADVERTISED_10baseT_Half |
4710 ADVERTISED_10baseT_Full |
4711 ADVERTISED_TP;
4712 ecmd->speed = SPEED_10;
4713 ecmd->port = PORT_TP;
4716 return 0;
4718 EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
4720 static int __init qeth_core_init(void)
4722 int rc;
4724 pr_info("loading core functions\n");
4725 INIT_LIST_HEAD(&qeth_core_card_list.list);
4726 rwlock_init(&qeth_core_card_list.rwlock);
4728 rc = qeth_register_dbf_views();
4729 if (rc)
4730 goto out_err;
4731 rc = ccw_driver_register(&qeth_ccw_driver);
4732 if (rc)
4733 goto ccw_err;
4734 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
4735 if (rc)
4736 goto ccwgroup_err;
4737 rc = driver_create_file(&qeth_core_ccwgroup_driver.driver,
4738 &driver_attr_group);
4739 if (rc)
4740 goto driver_err;
4741 qeth_core_root_dev = root_device_register("qeth");
4742 rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0;
4743 if (rc)
4744 goto register_err;
4746 qeth_core_header_cache = kmem_cache_create("qeth_hdr",
4747 sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL);
4748 if (!qeth_core_header_cache) {
4749 rc = -ENOMEM;
4750 goto slab_err;
4753 return 0;
4754 slab_err:
4755 root_device_unregister(qeth_core_root_dev);
4756 register_err:
4757 driver_remove_file(&qeth_core_ccwgroup_driver.driver,
4758 &driver_attr_group);
4759 driver_err:
4760 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
4761 ccwgroup_err:
4762 ccw_driver_unregister(&qeth_ccw_driver);
4763 ccw_err:
4764 QETH_DBF_MESSAGE(2, "Initialization failed with code %d\n", rc);
4765 qeth_unregister_dbf_views();
4766 out_err:
4767 pr_err("Initializing the qeth device driver failed\n");
4768 return rc;
4771 static void __exit qeth_core_exit(void)
4773 root_device_unregister(qeth_core_root_dev);
4774 driver_remove_file(&qeth_core_ccwgroup_driver.driver,
4775 &driver_attr_group);
4776 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
4777 ccw_driver_unregister(&qeth_ccw_driver);
4778 kmem_cache_destroy(qeth_core_header_cache);
4779 qeth_unregister_dbf_views();
4780 pr_info("core functions removed\n");
4783 module_init(qeth_core_init);
4784 module_exit(qeth_core_exit);
4785 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
4786 MODULE_DESCRIPTION("qeth core functions");
4787 MODULE_LICENSE("GPL");