[PATCH] IB uverbs: update mthca for new API
[linux-2.6.22.y-op.git] / drivers / infiniband / hw / mthca / mthca_provider.c
blob0cc86f8e1850c80efbc310d5206013ec5a118a45
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
33 * $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $
36 #include <ib_smi.h>
38 #include "mthca_dev.h"
39 #include "mthca_cmd.h"
41 static int mthca_query_device(struct ib_device *ibdev,
42 struct ib_device_attr *props)
44 struct ib_smp *in_mad = NULL;
45 struct ib_smp *out_mad = NULL;
46 int err = -ENOMEM;
47 struct mthca_dev* mdev = to_mdev(ibdev);
49 u8 status;
51 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
52 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
53 if (!in_mad || !out_mad)
54 goto out;
56 memset(props, 0, sizeof *props);
58 props->fw_ver = mdev->fw_ver;
60 memset(in_mad, 0, sizeof *in_mad);
61 in_mad->base_version = 1;
62 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
63 in_mad->class_version = 1;
64 in_mad->method = IB_MGMT_METHOD_GET;
65 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
67 err = mthca_MAD_IFC(mdev, 1, 1,
68 1, NULL, NULL, in_mad, out_mad,
69 &status);
70 if (err)
71 goto out;
72 if (status) {
73 err = -EINVAL;
74 goto out;
77 props->device_cap_flags = mdev->device_cap_flags;
78 props->vendor_id = be32_to_cpup((u32 *) (out_mad->data + 36)) &
79 0xffffff;
80 props->vendor_part_id = be16_to_cpup((u16 *) (out_mad->data + 30));
81 props->hw_ver = be16_to_cpup((u16 *) (out_mad->data + 32));
82 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
83 memcpy(&props->node_guid, out_mad->data + 12, 8);
85 props->max_mr_size = ~0ull;
86 props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
87 props->max_qp_wr = 0xffff;
88 props->max_sge = mdev->limits.max_sg;
89 props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
90 props->max_cqe = 0xffff;
91 props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
92 props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds;
93 props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift;
94 props->max_qp_init_rd_atom = 1 << mdev->qp_table.rdb_shift;
95 props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay;
97 err = 0;
98 out:
99 kfree(in_mad);
100 kfree(out_mad);
101 return err;
104 static int mthca_query_port(struct ib_device *ibdev,
105 u8 port, struct ib_port_attr *props)
107 struct ib_smp *in_mad = NULL;
108 struct ib_smp *out_mad = NULL;
109 int err = -ENOMEM;
110 u8 status;
112 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
113 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
114 if (!in_mad || !out_mad)
115 goto out;
117 memset(in_mad, 0, sizeof *in_mad);
118 in_mad->base_version = 1;
119 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
120 in_mad->class_version = 1;
121 in_mad->method = IB_MGMT_METHOD_GET;
122 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
123 in_mad->attr_mod = cpu_to_be32(port);
125 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
126 port, NULL, NULL, in_mad, out_mad,
127 &status);
128 if (err)
129 goto out;
130 if (status) {
131 err = -EINVAL;
132 goto out;
135 props->lid = be16_to_cpup((u16 *) (out_mad->data + 16));
136 props->lmc = out_mad->data[34] & 0x7;
137 props->sm_lid = be16_to_cpup((u16 *) (out_mad->data + 18));
138 props->sm_sl = out_mad->data[36] & 0xf;
139 props->state = out_mad->data[32] & 0xf;
140 props->phys_state = out_mad->data[33] >> 4;
141 props->port_cap_flags = be32_to_cpup((u32 *) (out_mad->data + 20));
142 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
143 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
144 props->qkey_viol_cntr = be16_to_cpup((u16 *) (out_mad->data + 48));
145 props->active_width = out_mad->data[31] & 0xf;
146 props->active_speed = out_mad->data[35] >> 4;
148 out:
149 kfree(in_mad);
150 kfree(out_mad);
151 return err;
154 static int mthca_modify_port(struct ib_device *ibdev,
155 u8 port, int port_modify_mask,
156 struct ib_port_modify *props)
158 struct mthca_set_ib_param set_ib;
159 struct ib_port_attr attr;
160 int err;
161 u8 status;
163 if (down_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
164 return -ERESTARTSYS;
166 err = mthca_query_port(ibdev, port, &attr);
167 if (err)
168 goto out;
170 set_ib.set_si_guid = 0;
171 set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
173 set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
174 ~props->clr_port_cap_mask;
176 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status);
177 if (err)
178 goto out;
179 if (status) {
180 err = -EINVAL;
181 goto out;
184 out:
185 up(&to_mdev(ibdev)->cap_mask_mutex);
186 return err;
189 static int mthca_query_pkey(struct ib_device *ibdev,
190 u8 port, u16 index, u16 *pkey)
192 struct ib_smp *in_mad = NULL;
193 struct ib_smp *out_mad = NULL;
194 int err = -ENOMEM;
195 u8 status;
197 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
198 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
199 if (!in_mad || !out_mad)
200 goto out;
202 memset(in_mad, 0, sizeof *in_mad);
203 in_mad->base_version = 1;
204 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
205 in_mad->class_version = 1;
206 in_mad->method = IB_MGMT_METHOD_GET;
207 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
208 in_mad->attr_mod = cpu_to_be32(index / 32);
210 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
211 port, NULL, NULL, in_mad, out_mad,
212 &status);
213 if (err)
214 goto out;
215 if (status) {
216 err = -EINVAL;
217 goto out;
220 *pkey = be16_to_cpu(((u16 *) out_mad->data)[index % 32]);
222 out:
223 kfree(in_mad);
224 kfree(out_mad);
225 return err;
228 static int mthca_query_gid(struct ib_device *ibdev, u8 port,
229 int index, union ib_gid *gid)
231 struct ib_smp *in_mad = NULL;
232 struct ib_smp *out_mad = NULL;
233 int err = -ENOMEM;
234 u8 status;
236 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
237 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
238 if (!in_mad || !out_mad)
239 goto out;
241 memset(in_mad, 0, sizeof *in_mad);
242 in_mad->base_version = 1;
243 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
244 in_mad->class_version = 1;
245 in_mad->method = IB_MGMT_METHOD_GET;
246 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
247 in_mad->attr_mod = cpu_to_be32(port);
249 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
250 port, NULL, NULL, in_mad, out_mad,
251 &status);
252 if (err)
253 goto out;
254 if (status) {
255 err = -EINVAL;
256 goto out;
259 memcpy(gid->raw, out_mad->data + 8, 8);
261 memset(in_mad, 0, sizeof *in_mad);
262 in_mad->base_version = 1;
263 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
264 in_mad->class_version = 1;
265 in_mad->method = IB_MGMT_METHOD_GET;
266 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
267 in_mad->attr_mod = cpu_to_be32(index / 8);
269 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
270 port, NULL, NULL, in_mad, out_mad,
271 &status);
272 if (err)
273 goto out;
274 if (status) {
275 err = -EINVAL;
276 goto out;
279 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 16, 8);
281 out:
282 kfree(in_mad);
283 kfree(out_mad);
284 return err;
287 static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
288 struct ib_ucontext *context,
289 struct ib_udata *udata)
291 struct mthca_pd *pd;
292 int err;
294 pd = kmalloc(sizeof *pd, GFP_KERNEL);
295 if (!pd)
296 return ERR_PTR(-ENOMEM);
298 err = mthca_pd_alloc(to_mdev(ibdev), pd);
299 if (err) {
300 kfree(pd);
301 return ERR_PTR(err);
304 return &pd->ibpd;
307 static int mthca_dealloc_pd(struct ib_pd *pd)
309 mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
310 kfree(pd);
312 return 0;
315 static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
316 struct ib_ah_attr *ah_attr)
318 int err;
319 struct mthca_ah *ah;
321 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
322 if (!ah)
323 return ERR_PTR(-ENOMEM);
325 err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
326 if (err) {
327 kfree(ah);
328 return ERR_PTR(err);
331 return &ah->ibah;
334 static int mthca_ah_destroy(struct ib_ah *ah)
336 mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
337 kfree(ah);
339 return 0;
342 static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
343 struct ib_qp_init_attr *init_attr,
344 struct ib_udata *udata)
346 struct mthca_qp *qp;
347 int err;
349 switch (init_attr->qp_type) {
350 case IB_QPT_RC:
351 case IB_QPT_UC:
352 case IB_QPT_UD:
354 qp = kmalloc(sizeof *qp, GFP_KERNEL);
355 if (!qp)
356 return ERR_PTR(-ENOMEM);
358 qp->sq.max = init_attr->cap.max_send_wr;
359 qp->rq.max = init_attr->cap.max_recv_wr;
360 qp->sq.max_gs = init_attr->cap.max_send_sge;
361 qp->rq.max_gs = init_attr->cap.max_recv_sge;
363 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
364 to_mcq(init_attr->send_cq),
365 to_mcq(init_attr->recv_cq),
366 init_attr->qp_type, init_attr->sq_sig_type,
367 qp);
368 qp->ibqp.qp_num = qp->qpn;
369 break;
371 case IB_QPT_SMI:
372 case IB_QPT_GSI:
374 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
375 if (!qp)
376 return ERR_PTR(-ENOMEM);
378 qp->sq.max = init_attr->cap.max_send_wr;
379 qp->rq.max = init_attr->cap.max_recv_wr;
380 qp->sq.max_gs = init_attr->cap.max_send_sge;
381 qp->rq.max_gs = init_attr->cap.max_recv_sge;
383 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
385 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
386 to_mcq(init_attr->send_cq),
387 to_mcq(init_attr->recv_cq),
388 init_attr->sq_sig_type,
389 qp->ibqp.qp_num, init_attr->port_num,
390 to_msqp(qp));
391 break;
393 default:
394 /* Don't support raw QPs */
395 return ERR_PTR(-ENOSYS);
398 if (err) {
399 kfree(qp);
400 return ERR_PTR(err);
403 init_attr->cap.max_inline_data = 0;
405 return &qp->ibqp;
408 static int mthca_destroy_qp(struct ib_qp *qp)
410 mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
411 kfree(qp);
412 return 0;
415 static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
416 struct ib_ucontext *context,
417 struct ib_udata *udata)
419 struct mthca_cq *cq;
420 int nent;
421 int err;
423 cq = kmalloc(sizeof *cq, GFP_KERNEL);
424 if (!cq)
425 return ERR_PTR(-ENOMEM);
427 for (nent = 1; nent <= entries; nent <<= 1)
428 ; /* nothing */
430 err = mthca_init_cq(to_mdev(ibdev), nent, cq);
431 if (err) {
432 kfree(cq);
433 cq = ERR_PTR(err);
436 return &cq->ibcq;
439 static int mthca_destroy_cq(struct ib_cq *cq)
441 mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
442 kfree(cq);
444 return 0;
447 static inline u32 convert_access(int acc)
449 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) |
450 (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
451 (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) |
452 (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) |
453 MTHCA_MPT_FLAG_LOCAL_READ;
456 static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
458 struct mthca_mr *mr;
459 int err;
461 mr = kmalloc(sizeof *mr, GFP_KERNEL);
462 if (!mr)
463 return ERR_PTR(-ENOMEM);
465 err = mthca_mr_alloc_notrans(to_mdev(pd->device),
466 to_mpd(pd)->pd_num,
467 convert_access(acc), mr);
469 if (err) {
470 kfree(mr);
471 return ERR_PTR(err);
474 return &mr->ibmr;
477 static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
478 struct ib_phys_buf *buffer_list,
479 int num_phys_buf,
480 int acc,
481 u64 *iova_start)
483 struct mthca_mr *mr;
484 u64 *page_list;
485 u64 total_size;
486 u64 mask;
487 int shift;
488 int npages;
489 int err;
490 int i, j, n;
492 /* First check that we have enough alignment */
493 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
494 return ERR_PTR(-EINVAL);
496 if (num_phys_buf > 1 &&
497 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK))
498 return ERR_PTR(-EINVAL);
500 mask = 0;
501 total_size = 0;
502 for (i = 0; i < num_phys_buf; ++i) {
503 if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
504 return ERR_PTR(-EINVAL);
505 if (i != 0 && i != num_phys_buf - 1 &&
506 (buffer_list[i].size & ~PAGE_MASK))
507 return ERR_PTR(-EINVAL);
509 total_size += buffer_list[i].size;
510 if (i > 0)
511 mask |= buffer_list[i].addr;
514 /* Find largest page shift we can use to cover buffers */
515 for (shift = PAGE_SHIFT; shift < 31; ++shift)
516 if (num_phys_buf > 1) {
517 if ((1ULL << shift) & mask)
518 break;
519 } else {
520 if (1ULL << shift >=
521 buffer_list[0].size +
522 (buffer_list[0].addr & ((1ULL << shift) - 1)))
523 break;
526 buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
527 buffer_list[0].addr &= ~0ull << shift;
529 mr = kmalloc(sizeof *mr, GFP_KERNEL);
530 if (!mr)
531 return ERR_PTR(-ENOMEM);
533 npages = 0;
534 for (i = 0; i < num_phys_buf; ++i)
535 npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
537 if (!npages)
538 return &mr->ibmr;
540 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL);
541 if (!page_list) {
542 kfree(mr);
543 return ERR_PTR(-ENOMEM);
546 n = 0;
547 for (i = 0; i < num_phys_buf; ++i)
548 for (j = 0;
549 j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
550 ++j)
551 page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
553 mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) "
554 "in PD %x; shift %d, npages %d.\n",
555 (unsigned long long) buffer_list[0].addr,
556 (unsigned long long) *iova_start,
557 to_mpd(pd)->pd_num,
558 shift, npages);
560 err = mthca_mr_alloc_phys(to_mdev(pd->device),
561 to_mpd(pd)->pd_num,
562 page_list, shift, npages,
563 *iova_start, total_size,
564 convert_access(acc), mr);
566 if (err) {
567 kfree(page_list);
568 kfree(mr);
569 return ERR_PTR(err);
572 kfree(page_list);
573 return &mr->ibmr;
576 static int mthca_dereg_mr(struct ib_mr *mr)
578 struct mthca_mr *mmr = to_mmr(mr);
579 mthca_free_mr(to_mdev(mr->device), mmr);
580 kfree(mmr);
581 return 0;
584 static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
585 struct ib_fmr_attr *fmr_attr)
587 struct mthca_fmr *fmr;
588 int err;
590 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
591 if (!fmr)
592 return ERR_PTR(-ENOMEM);
594 memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
595 err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
596 convert_access(mr_access_flags), fmr);
598 if (err) {
599 kfree(fmr);
600 return ERR_PTR(err);
603 return &fmr->ibmr;
606 static int mthca_dealloc_fmr(struct ib_fmr *fmr)
608 struct mthca_fmr *mfmr = to_mfmr(fmr);
609 int err;
611 err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
612 if (err)
613 return err;
615 kfree(mfmr);
616 return 0;
619 static int mthca_unmap_fmr(struct list_head *fmr_list)
621 struct ib_fmr *fmr;
622 int err;
623 u8 status;
624 struct mthca_dev *mdev = NULL;
626 list_for_each_entry(fmr, fmr_list, list) {
627 if (mdev && to_mdev(fmr->device) != mdev)
628 return -EINVAL;
629 mdev = to_mdev(fmr->device);
632 if (!mdev)
633 return 0;
635 if (mthca_is_memfree(mdev)) {
636 list_for_each_entry(fmr, fmr_list, list)
637 mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
639 wmb();
640 } else
641 list_for_each_entry(fmr, fmr_list, list)
642 mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
644 err = mthca_SYNC_TPT(mdev, &status);
645 if (err)
646 return err;
647 if (status)
648 return -EINVAL;
649 return 0;
652 static ssize_t show_rev(struct class_device *cdev, char *buf)
654 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
655 return sprintf(buf, "%x\n", dev->rev_id);
658 static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
660 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
661 return sprintf(buf, "%x.%x.%x\n", (int) (dev->fw_ver >> 32),
662 (int) (dev->fw_ver >> 16) & 0xffff,
663 (int) dev->fw_ver & 0xffff);
666 static ssize_t show_hca(struct class_device *cdev, char *buf)
668 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
669 switch (dev->pdev->device) {
670 case PCI_DEVICE_ID_MELLANOX_TAVOR:
671 return sprintf(buf, "MT23108\n");
672 case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
673 return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
674 case PCI_DEVICE_ID_MELLANOX_ARBEL:
675 return sprintf(buf, "MT25208\n");
676 case PCI_DEVICE_ID_MELLANOX_SINAI:
677 case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
678 return sprintf(buf, "MT25204\n");
679 default:
680 return sprintf(buf, "unknown\n");
684 static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
685 static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
686 static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
688 static struct class_device_attribute *mthca_class_attributes[] = {
689 &class_device_attr_hw_rev,
690 &class_device_attr_fw_ver,
691 &class_device_attr_hca_type
694 int mthca_register_device(struct mthca_dev *dev)
696 int ret;
697 int i;
699 strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
700 dev->ib_dev.owner = THIS_MODULE;
702 dev->ib_dev.node_type = IB_NODE_CA;
703 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
704 dev->ib_dev.dma_device = &dev->pdev->dev;
705 dev->ib_dev.class_dev.dev = &dev->pdev->dev;
706 dev->ib_dev.query_device = mthca_query_device;
707 dev->ib_dev.query_port = mthca_query_port;
708 dev->ib_dev.modify_port = mthca_modify_port;
709 dev->ib_dev.query_pkey = mthca_query_pkey;
710 dev->ib_dev.query_gid = mthca_query_gid;
711 dev->ib_dev.alloc_pd = mthca_alloc_pd;
712 dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
713 dev->ib_dev.create_ah = mthca_ah_create;
714 dev->ib_dev.destroy_ah = mthca_ah_destroy;
715 dev->ib_dev.create_qp = mthca_create_qp;
716 dev->ib_dev.modify_qp = mthca_modify_qp;
717 dev->ib_dev.destroy_qp = mthca_destroy_qp;
718 dev->ib_dev.create_cq = mthca_create_cq;
719 dev->ib_dev.destroy_cq = mthca_destroy_cq;
720 dev->ib_dev.poll_cq = mthca_poll_cq;
721 dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
722 dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr;
723 dev->ib_dev.dereg_mr = mthca_dereg_mr;
725 if (dev->mthca_flags & MTHCA_FLAG_FMR) {
726 dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
727 dev->ib_dev.unmap_fmr = mthca_unmap_fmr;
728 dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr;
729 if (mthca_is_memfree(dev))
730 dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
731 else
732 dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
735 dev->ib_dev.attach_mcast = mthca_multicast_attach;
736 dev->ib_dev.detach_mcast = mthca_multicast_detach;
737 dev->ib_dev.process_mad = mthca_process_mad;
739 if (mthca_is_memfree(dev)) {
740 dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
741 dev->ib_dev.post_send = mthca_arbel_post_send;
742 dev->ib_dev.post_recv = mthca_arbel_post_receive;
743 } else {
744 dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
745 dev->ib_dev.post_send = mthca_tavor_post_send;
746 dev->ib_dev.post_recv = mthca_tavor_post_receive;
749 init_MUTEX(&dev->cap_mask_mutex);
751 ret = ib_register_device(&dev->ib_dev);
752 if (ret)
753 return ret;
755 for (i = 0; i < ARRAY_SIZE(mthca_class_attributes); ++i) {
756 ret = class_device_create_file(&dev->ib_dev.class_dev,
757 mthca_class_attributes[i]);
758 if (ret) {
759 ib_unregister_device(&dev->ib_dev);
760 return ret;
764 return 0;
767 void mthca_unregister_device(struct mthca_dev *dev)
769 ib_unregister_device(&dev->ib_dev);