GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / infiniband / hw / mthca / mthca_eq.c
blob8e8c728aff8856c109167db1356246eb8ee69476
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include <linux/errno.h>
35 #include <linux/interrupt.h>
36 #include <linux/pci.h>
37 #include <linux/slab.h>
39 #include "mthca_dev.h"
40 #include "mthca_cmd.h"
41 #include "mthca_config_reg.h"
43 enum {
44 MTHCA_NUM_ASYNC_EQE = 0x80,
45 MTHCA_NUM_CMD_EQE = 0x80,
46 MTHCA_NUM_SPARE_EQE = 0x80,
47 MTHCA_EQ_ENTRY_SIZE = 0x20
51 * Must be packed because start is 64 bits but only aligned to 32 bits.
53 struct mthca_eq_context {
54 __be32 flags;
55 __be64 start;
56 __be32 logsize_usrpage;
57 __be32 tavor_pd; /* reserved for Arbel */
58 u8 reserved1[3];
59 u8 intr;
60 __be32 arbel_pd; /* lost_count for Tavor */
61 __be32 lkey;
62 u32 reserved2[2];
63 __be32 consumer_index;
64 __be32 producer_index;
65 u32 reserved3[4];
66 } __attribute__((packed));
68 #define MTHCA_EQ_STATUS_OK ( 0 << 28)
69 #define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28)
70 #define MTHCA_EQ_STATUS_WRITE_FAIL (10 << 28)
71 #define MTHCA_EQ_OWNER_SW ( 0 << 24)
72 #define MTHCA_EQ_OWNER_HW ( 1 << 24)
73 #define MTHCA_EQ_FLAG_TR ( 1 << 18)
74 #define MTHCA_EQ_FLAG_OI ( 1 << 17)
75 #define MTHCA_EQ_STATE_ARMED ( 1 << 8)
76 #define MTHCA_EQ_STATE_FIRED ( 2 << 8)
77 #define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 << 8)
78 #define MTHCA_EQ_STATE_ARBEL ( 8 << 8)
80 enum {
81 MTHCA_EVENT_TYPE_COMP = 0x00,
82 MTHCA_EVENT_TYPE_PATH_MIG = 0x01,
83 MTHCA_EVENT_TYPE_COMM_EST = 0x02,
84 MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03,
85 MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13,
86 MTHCA_EVENT_TYPE_SRQ_LIMIT = 0x14,
87 MTHCA_EVENT_TYPE_CQ_ERROR = 0x04,
88 MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
89 MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06,
90 MTHCA_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
91 MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
92 MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
93 MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
94 MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08,
95 MTHCA_EVENT_TYPE_PORT_CHANGE = 0x09,
96 MTHCA_EVENT_TYPE_EQ_OVERFLOW = 0x0f,
97 MTHCA_EVENT_TYPE_ECC_DETECT = 0x0e,
98 MTHCA_EVENT_TYPE_CMD = 0x0a
101 #define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG) | \
102 (1ULL << MTHCA_EVENT_TYPE_COMM_EST) | \
103 (1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED) | \
104 (1ULL << MTHCA_EVENT_TYPE_CQ_ERROR) | \
105 (1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \
106 (1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \
107 (1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \
108 (1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
109 (1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \
110 (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
111 (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \
112 (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
113 #define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
114 (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
115 (1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
116 #define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD)
118 #define MTHCA_EQ_DB_INC_CI (1 << 24)
119 #define MTHCA_EQ_DB_REQ_NOT (2 << 24)
120 #define MTHCA_EQ_DB_DISARM_CQ (3 << 24)
121 #define MTHCA_EQ_DB_SET_CI (4 << 24)
122 #define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24)
124 struct mthca_eqe {
125 u8 reserved1;
126 u8 type;
127 u8 reserved2;
128 u8 subtype;
129 union {
130 u32 raw[6];
131 struct {
132 __be32 cqn;
133 } __attribute__((packed)) comp;
134 struct {
135 u16 reserved1;
136 __be16 token;
137 u32 reserved2;
138 u8 reserved3[3];
139 u8 status;
140 __be64 out_param;
141 } __attribute__((packed)) cmd;
142 struct {
143 __be32 qpn;
144 } __attribute__((packed)) qp;
145 struct {
146 __be32 srqn;
147 } __attribute__((packed)) srq;
148 struct {
149 __be32 cqn;
150 u32 reserved1;
151 u8 reserved2[3];
152 u8 syndrome;
153 } __attribute__((packed)) cq_err;
154 struct {
155 u32 reserved1[2];
156 __be32 port;
157 } __attribute__((packed)) port_change;
158 } event;
159 u8 reserved3[3];
160 u8 owner;
161 } __attribute__((packed));
163 #define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7)
164 #define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7)
166 static inline u64 async_mask(struct mthca_dev *dev)
168 return dev->mthca_flags & MTHCA_FLAG_SRQ ?
169 MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK :
170 MTHCA_ASYNC_EVENT_MASK;
173 static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
176 * This barrier makes sure that all updates to ownership bits
177 * done by set_eqe_hw() hit memory before the consumer index
178 * is updated. set_eq_ci() allows the HCA to possibly write
179 * more EQ entries, and we want to avoid the exceedingly
180 * unlikely possibility of the HCA writing an entry and then
181 * having set_eqe_hw() overwrite the owner field.
183 wmb();
184 mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1),
185 dev->kar + MTHCA_EQ_DOORBELL,
186 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
189 static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
191 /* See comment in tavor_set_eq_ci() above. */
192 wmb();
193 __raw_writel((__force u32) cpu_to_be32(ci),
194 dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8);
195 /* We still want ordering, just not swabbing, so add a barrier */
196 mb();
199 static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
201 if (mthca_is_memfree(dev))
202 arbel_set_eq_ci(dev, eq, ci);
203 else
204 tavor_set_eq_ci(dev, eq, ci);
207 static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn)
209 mthca_write64(MTHCA_EQ_DB_REQ_NOT | eqn, 0,
210 dev->kar + MTHCA_EQ_DOORBELL,
211 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
214 static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)
216 writel(eqn_mask, dev->eq_regs.arbel.eq_arm);
219 static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
221 if (!mthca_is_memfree(dev)) {
222 mthca_write64(MTHCA_EQ_DB_DISARM_CQ | eqn, cqn,
223 dev->kar + MTHCA_EQ_DOORBELL,
224 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
228 static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry)
230 unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE;
231 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
234 static inline struct mthca_eqe *next_eqe_sw(struct mthca_eq *eq)
236 struct mthca_eqe *eqe;
237 eqe = get_eqe(eq, eq->cons_index);
238 return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;
241 static inline void set_eqe_hw(struct mthca_eqe *eqe)
243 eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW;
246 static void port_change(struct mthca_dev *dev, int port, int active)
248 struct ib_event record;
250 mthca_dbg(dev, "Port change to %s for port %d\n",
251 active ? "active" : "down", port);
253 record.device = &dev->ib_dev;
254 record.event = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
255 record.element.port_num = port;
257 ib_dispatch_event(&record);
260 static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
262 struct mthca_eqe *eqe;
263 int disarm_cqn;
264 int eqes_found = 0;
265 int set_ci = 0;
267 while ((eqe = next_eqe_sw(eq))) {
269 * Make sure we read EQ entry contents after we've
270 * checked the ownership bit.
272 rmb();
274 switch (eqe->type) {
275 case MTHCA_EVENT_TYPE_COMP:
276 disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
277 disarm_cq(dev, eq->eqn, disarm_cqn);
278 mthca_cq_completion(dev, disarm_cqn);
279 break;
281 case MTHCA_EVENT_TYPE_PATH_MIG:
282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
283 IB_EVENT_PATH_MIG);
284 break;
286 case MTHCA_EVENT_TYPE_COMM_EST:
287 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
288 IB_EVENT_COMM_EST);
289 break;
291 case MTHCA_EVENT_TYPE_SQ_DRAINED:
292 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
293 IB_EVENT_SQ_DRAINED);
294 break;
296 case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:
297 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
298 IB_EVENT_QP_LAST_WQE_REACHED);
299 break;
301 case MTHCA_EVENT_TYPE_SRQ_LIMIT:
302 mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
303 IB_EVENT_SRQ_LIMIT_REACHED);
304 break;
306 case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:
307 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
308 IB_EVENT_QP_FATAL);
309 break;
311 case MTHCA_EVENT_TYPE_PATH_MIG_FAILED:
312 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
313 IB_EVENT_PATH_MIG_ERR);
314 break;
316 case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
317 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
318 IB_EVENT_QP_REQ_ERR);
319 break;
321 case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR:
322 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
323 IB_EVENT_QP_ACCESS_ERR);
324 break;
326 case MTHCA_EVENT_TYPE_CMD:
327 mthca_cmd_event(dev,
328 be16_to_cpu(eqe->event.cmd.token),
329 eqe->event.cmd.status,
330 be64_to_cpu(eqe->event.cmd.out_param));
331 break;
333 case MTHCA_EVENT_TYPE_PORT_CHANGE:
334 port_change(dev,
335 (be32_to_cpu(eqe->event.port_change.port) >> 28) & 3,
336 eqe->subtype == 0x4);
337 break;
339 case MTHCA_EVENT_TYPE_CQ_ERROR:
340 mthca_warn(dev, "CQ %s on CQN %06x\n",
341 eqe->event.cq_err.syndrome == 1 ?
342 "overrun" : "access violation",
343 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
344 mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
345 IB_EVENT_CQ_ERR);
346 break;
348 case MTHCA_EVENT_TYPE_EQ_OVERFLOW:
349 mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
350 break;
352 case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR:
353 case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:
354 case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR:
355 case MTHCA_EVENT_TYPE_ECC_DETECT:
356 default:
357 mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
358 eqe->type, eqe->subtype, eq->eqn);
359 break;
362 set_eqe_hw(eqe);
363 ++eq->cons_index;
364 eqes_found = 1;
365 ++set_ci;
368 * The HCA will think the queue has overflowed if we
369 * don't tell it we've been processing events. We
370 * create our EQs with MTHCA_NUM_SPARE_EQE extra
371 * entries, so we must update our consumer index at
372 * least that often.
374 if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) {
376 * Conditional on hca_type is OK here because
377 * this is a rare case, not the fast path.
379 set_eq_ci(dev, eq, eq->cons_index);
380 set_ci = 0;
385 * Rely on caller to set consumer index so that we don't have
386 * to test hca_type in our interrupt handling fast path.
388 return eqes_found;
391 static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr)
393 struct mthca_dev *dev = dev_ptr;
394 u32 ecr;
395 int i;
397 if (dev->eq_table.clr_mask)
398 writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
400 ecr = readl(dev->eq_regs.tavor.ecr_base + 4);
401 if (!ecr)
402 return IRQ_NONE;
404 writel(ecr, dev->eq_regs.tavor.ecr_base +
405 MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);
407 for (i = 0; i < MTHCA_NUM_EQ; ++i)
408 if (ecr & dev->eq_table.eq[i].eqn_mask) {
409 if (mthca_eq_int(dev, &dev->eq_table.eq[i]))
410 tavor_set_eq_ci(dev, &dev->eq_table.eq[i],
411 dev->eq_table.eq[i].cons_index);
412 tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
415 return IRQ_HANDLED;
418 static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr)
420 struct mthca_eq *eq = eq_ptr;
421 struct mthca_dev *dev = eq->dev;
423 mthca_eq_int(dev, eq);
424 tavor_set_eq_ci(dev, eq, eq->cons_index);
425 tavor_eq_req_not(dev, eq->eqn);
427 /* MSI-X vectors always belong to us */
428 return IRQ_HANDLED;
431 static irqreturn_t mthca_arbel_interrupt(int irq, void *dev_ptr)
433 struct mthca_dev *dev = dev_ptr;
434 int work = 0;
435 int i;
437 if (dev->eq_table.clr_mask)
438 writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
440 for (i = 0; i < MTHCA_NUM_EQ; ++i)
441 if (mthca_eq_int(dev, &dev->eq_table.eq[i])) {
442 work = 1;
443 arbel_set_eq_ci(dev, &dev->eq_table.eq[i],
444 dev->eq_table.eq[i].cons_index);
447 arbel_eq_req_not(dev, dev->eq_table.arm_mask);
449 return IRQ_RETVAL(work);
452 static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr)
454 struct mthca_eq *eq = eq_ptr;
455 struct mthca_dev *dev = eq->dev;
457 mthca_eq_int(dev, eq);
458 arbel_set_eq_ci(dev, eq, eq->cons_index);
459 arbel_eq_req_not(dev, eq->eqn_mask);
461 /* MSI-X vectors always belong to us */
462 return IRQ_HANDLED;
465 static int mthca_create_eq(struct mthca_dev *dev,
466 int nent,
467 u8 intr,
468 struct mthca_eq *eq)
470 int npages;
471 u64 *dma_list = NULL;
472 dma_addr_t t;
473 struct mthca_mailbox *mailbox;
474 struct mthca_eq_context *eq_context;
475 int err = -ENOMEM;
476 int i;
477 u8 status;
479 eq->dev = dev;
480 eq->nent = roundup_pow_of_two(max(nent, 2));
481 npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
483 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
484 GFP_KERNEL);
485 if (!eq->page_list)
486 goto err_out;
488 for (i = 0; i < npages; ++i)
489 eq->page_list[i].buf = NULL;
491 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
492 if (!dma_list)
493 goto err_out_free;
495 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
496 if (IS_ERR(mailbox))
497 goto err_out_free;
498 eq_context = mailbox->buf;
500 for (i = 0; i < npages; ++i) {
501 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
502 PAGE_SIZE, &t, GFP_KERNEL);
503 if (!eq->page_list[i].buf)
504 goto err_out_free_pages;
506 dma_list[i] = t;
507 dma_unmap_addr_set(&eq->page_list[i], mapping, t);
509 clear_page(eq->page_list[i].buf);
512 for (i = 0; i < eq->nent; ++i)
513 set_eqe_hw(get_eqe(eq, i));
515 eq->eqn = mthca_alloc(&dev->eq_table.alloc);
516 if (eq->eqn == -1)
517 goto err_out_free_pages;
519 err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
520 dma_list, PAGE_SHIFT, npages,
521 0, npages * PAGE_SIZE,
522 MTHCA_MPT_FLAG_LOCAL_WRITE |
523 MTHCA_MPT_FLAG_LOCAL_READ,
524 &eq->mr);
525 if (err)
526 goto err_out_free_eq;
528 memset(eq_context, 0, sizeof *eq_context);
529 eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK |
530 MTHCA_EQ_OWNER_HW |
531 MTHCA_EQ_STATE_ARMED |
532 MTHCA_EQ_FLAG_TR);
533 if (mthca_is_memfree(dev))
534 eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);
536 eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24);
537 if (mthca_is_memfree(dev)) {
538 eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
539 } else {
540 eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
541 eq_context->tavor_pd = cpu_to_be32(dev->driver_pd.pd_num);
543 eq_context->intr = intr;
544 eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey);
546 err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status);
547 if (err) {
548 mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err);
549 goto err_out_free_mr;
551 if (status) {
552 mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n",
553 status);
554 err = -EINVAL;
555 goto err_out_free_mr;
558 kfree(dma_list);
559 mthca_free_mailbox(dev, mailbox);
561 eq->eqn_mask = swab32(1 << eq->eqn);
562 eq->cons_index = 0;
564 dev->eq_table.arm_mask |= eq->eqn_mask;
566 mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
567 eq->eqn, eq->nent);
569 return err;
571 err_out_free_mr:
572 mthca_free_mr(dev, &eq->mr);
574 err_out_free_eq:
575 mthca_free(&dev->eq_table.alloc, eq->eqn);
577 err_out_free_pages:
578 for (i = 0; i < npages; ++i)
579 if (eq->page_list[i].buf)
580 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
581 eq->page_list[i].buf,
582 dma_unmap_addr(&eq->page_list[i],
583 mapping));
585 mthca_free_mailbox(dev, mailbox);
587 err_out_free:
588 kfree(eq->page_list);
589 kfree(dma_list);
591 err_out:
592 return err;
595 static void mthca_free_eq(struct mthca_dev *dev,
596 struct mthca_eq *eq)
598 struct mthca_mailbox *mailbox;
599 int err;
600 u8 status;
601 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
602 PAGE_SIZE;
603 int i;
605 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
606 if (IS_ERR(mailbox))
607 return;
609 err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status);
610 if (err)
611 mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err);
612 if (status)
613 mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status);
615 dev->eq_table.arm_mask &= ~eq->eqn_mask;
617 if (0) {
618 mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
619 for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) {
620 if (i % 4 == 0)
621 printk("[%02x] ", i * 4);
622 printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
623 if ((i + 1) % 4 == 0)
624 printk("\n");
628 mthca_free_mr(dev, &eq->mr);
629 for (i = 0; i < npages; ++i)
630 pci_free_consistent(dev->pdev, PAGE_SIZE,
631 eq->page_list[i].buf,
632 dma_unmap_addr(&eq->page_list[i], mapping));
634 kfree(eq->page_list);
635 mthca_free_mailbox(dev, mailbox);
638 static void mthca_free_irqs(struct mthca_dev *dev)
640 int i;
642 if (dev->eq_table.have_irq)
643 free_irq(dev->pdev->irq, dev);
644 for (i = 0; i < MTHCA_NUM_EQ; ++i)
645 if (dev->eq_table.eq[i].have_irq) {
646 free_irq(dev->eq_table.eq[i].msi_x_vector,
647 dev->eq_table.eq + i);
648 dev->eq_table.eq[i].have_irq = 0;
652 static int mthca_map_reg(struct mthca_dev *dev,
653 unsigned long offset, unsigned long size,
654 void __iomem **map)
656 unsigned long base = pci_resource_start(dev->pdev, 0);
658 *map = ioremap(base + offset, size);
659 if (!*map)
660 return -ENOMEM;
662 return 0;
665 static int mthca_map_eq_regs(struct mthca_dev *dev)
667 if (mthca_is_memfree(dev)) {
669 * We assume that the EQ arm and EQ set CI registers
670 * fall within the first BAR. We can't trust the
671 * values firmware gives us, since those addresses are
672 * valid on the HCA's side of the PCI bus but not
673 * necessarily the host side.
675 if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
676 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
677 &dev->clr_base)) {
678 mthca_err(dev, "Couldn't map interrupt clear register, "
679 "aborting.\n");
680 return -ENOMEM;
684 * Add 4 because we limit ourselves to EQs 0 ... 31,
685 * so we only need the low word of the register.
687 if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
688 dev->fw.arbel.eq_arm_base) + 4, 4,
689 &dev->eq_regs.arbel.eq_arm)) {
690 mthca_err(dev, "Couldn't map EQ arm register, aborting.\n");
691 iounmap(dev->clr_base);
692 return -ENOMEM;
695 if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
696 dev->fw.arbel.eq_set_ci_base,
697 MTHCA_EQ_SET_CI_SIZE,
698 &dev->eq_regs.arbel.eq_set_ci_base)) {
699 mthca_err(dev, "Couldn't map EQ CI register, aborting.\n");
700 iounmap(dev->eq_regs.arbel.eq_arm);
701 iounmap(dev->clr_base);
702 return -ENOMEM;
704 } else {
705 if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
706 &dev->clr_base)) {
707 mthca_err(dev, "Couldn't map interrupt clear register, "
708 "aborting.\n");
709 return -ENOMEM;
712 if (mthca_map_reg(dev, MTHCA_ECR_BASE,
713 MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,
714 &dev->eq_regs.tavor.ecr_base)) {
715 mthca_err(dev, "Couldn't map ecr register, "
716 "aborting.\n");
717 iounmap(dev->clr_base);
718 return -ENOMEM;
722 return 0;
726 static void mthca_unmap_eq_regs(struct mthca_dev *dev)
728 if (mthca_is_memfree(dev)) {
729 iounmap(dev->eq_regs.arbel.eq_set_ci_base);
730 iounmap(dev->eq_regs.arbel.eq_arm);
731 iounmap(dev->clr_base);
732 } else {
733 iounmap(dev->eq_regs.tavor.ecr_base);
734 iounmap(dev->clr_base);
738 int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
740 int ret;
741 u8 status;
744 * We assume that mapping one page is enough for the whole EQ
745 * context table. This is fine with all current HCAs, because
746 * we only use 32 EQs and each EQ uses 32 bytes of context
747 * memory, or 1 KB total.
749 dev->eq_table.icm_virt = icm_virt;
750 dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
751 if (!dev->eq_table.icm_page)
752 return -ENOMEM;
753 dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
754 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
755 if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {
756 __free_page(dev->eq_table.icm_page);
757 return -ENOMEM;
760 ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status);
761 if (!ret && status)
762 ret = -EINVAL;
763 if (ret) {
764 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
765 PCI_DMA_BIDIRECTIONAL);
766 __free_page(dev->eq_table.icm_page);
769 return ret;
772 void mthca_unmap_eq_icm(struct mthca_dev *dev)
774 u8 status;
776 mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1, &status);
777 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
778 PCI_DMA_BIDIRECTIONAL);
779 __free_page(dev->eq_table.icm_page);
782 int mthca_init_eq_table(struct mthca_dev *dev)
784 int err;
785 u8 status;
786 u8 intr;
787 int i;
789 err = mthca_alloc_init(&dev->eq_table.alloc,
790 dev->limits.num_eqs,
791 dev->limits.num_eqs - 1,
792 dev->limits.reserved_eqs);
793 if (err)
794 return err;
796 err = mthca_map_eq_regs(dev);
797 if (err)
798 goto err_out_free;
800 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
801 dev->eq_table.clr_mask = 0;
802 } else {
803 dev->eq_table.clr_mask =
804 swab32(1 << (dev->eq_table.inta_pin & 31));
805 dev->eq_table.clr_int = dev->clr_base +
806 (dev->eq_table.inta_pin < 32 ? 4 : 0);
809 dev->eq_table.arm_mask = 0;
811 intr = dev->eq_table.inta_pin;
813 err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
814 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
815 &dev->eq_table.eq[MTHCA_EQ_COMP]);
816 if (err)
817 goto err_out_unmap;
819 err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,
820 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
821 &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
822 if (err)
823 goto err_out_comp;
825 err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,
826 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
827 &dev->eq_table.eq[MTHCA_EQ_CMD]);
828 if (err)
829 goto err_out_async;
831 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
832 static const char *eq_name[] = {
833 [MTHCA_EQ_COMP] = DRV_NAME "-comp",
834 [MTHCA_EQ_ASYNC] = DRV_NAME "-async",
835 [MTHCA_EQ_CMD] = DRV_NAME "-cmd"
838 for (i = 0; i < MTHCA_NUM_EQ; ++i) {
839 snprintf(dev->eq_table.eq[i].irq_name,
840 IB_DEVICE_NAME_MAX,
841 "%s@pci:%s", eq_name[i],
842 pci_name(dev->pdev));
843 err = request_irq(dev->eq_table.eq[i].msi_x_vector,
844 mthca_is_memfree(dev) ?
845 mthca_arbel_msi_x_interrupt :
846 mthca_tavor_msi_x_interrupt,
847 0, dev->eq_table.eq[i].irq_name,
848 dev->eq_table.eq + i);
849 if (err)
850 goto err_out_cmd;
851 dev->eq_table.eq[i].have_irq = 1;
853 } else {
854 snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX,
855 DRV_NAME "@pci:%s", pci_name(dev->pdev));
856 err = request_irq(dev->pdev->irq,
857 mthca_is_memfree(dev) ?
858 mthca_arbel_interrupt :
859 mthca_tavor_interrupt,
860 IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev);
861 if (err)
862 goto err_out_cmd;
863 dev->eq_table.have_irq = 1;
866 err = mthca_MAP_EQ(dev, async_mask(dev),
867 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
868 if (err)
869 mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
870 dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err);
871 if (status)
872 mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n",
873 dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status);
875 err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
876 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
877 if (err)
878 mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",
879 dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);
880 if (status)
881 mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n",
882 dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status);
884 for (i = 0; i < MTHCA_NUM_EQ; ++i)
885 if (mthca_is_memfree(dev))
886 arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
887 else
888 tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
890 return 0;
892 err_out_cmd:
893 mthca_free_irqs(dev);
894 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);
896 err_out_async:
897 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
899 err_out_comp:
900 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]);
902 err_out_unmap:
903 mthca_unmap_eq_regs(dev);
905 err_out_free:
906 mthca_alloc_cleanup(&dev->eq_table.alloc);
907 return err;
910 void mthca_cleanup_eq_table(struct mthca_dev *dev)
912 u8 status;
913 int i;
915 mthca_free_irqs(dev);
917 mthca_MAP_EQ(dev, async_mask(dev),
918 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
919 mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
920 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
922 for (i = 0; i < MTHCA_NUM_EQ; ++i)
923 mthca_free_eq(dev, &dev->eq_table.eq[i]);
925 mthca_unmap_eq_regs(dev);
927 mthca_alloc_cleanup(&dev->eq_table.alloc);