2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/errno.h>
37 #include <rdma/ib_smi.h>
38 #include <rdma/ib_user_verbs.h>
40 #include <linux/mlx4/driver.h>
41 #include <linux/mlx4/cmd.h>
46 #define DRV_NAME "mlx4_ib"
47 #define DRV_VERSION "1.0"
48 #define DRV_RELDATE "April 4, 2008"
50 MODULE_AUTHOR("Roland Dreier");
51 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
52 MODULE_LICENSE("Dual BSD/GPL");
53 MODULE_VERSION(DRV_VERSION
);
55 static const char mlx4_ib_version
[] =
56 DRV_NAME
": Mellanox ConnectX InfiniBand driver v"
57 DRV_VERSION
" (" DRV_RELDATE
")\n";
59 static void init_query_mad(struct ib_smp
*mad
)
61 mad
->base_version
= 1;
62 mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
63 mad
->class_version
= 1;
64 mad
->method
= IB_MGMT_METHOD_GET
;
67 static int mlx4_ib_query_device(struct ib_device
*ibdev
,
68 struct ib_device_attr
*props
)
70 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
71 struct ib_smp
*in_mad
= NULL
;
72 struct ib_smp
*out_mad
= NULL
;
75 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
76 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
77 if (!in_mad
|| !out_mad
)
80 init_query_mad(in_mad
);
81 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
83 err
= mlx4_MAD_IFC(to_mdev(ibdev
), 1, 1, 1, NULL
, NULL
, in_mad
, out_mad
);
87 memset(props
, 0, sizeof *props
);
89 props
->fw_ver
= dev
->dev
->caps
.fw_ver
;
90 props
->device_cap_flags
= IB_DEVICE_CHANGE_PHY_PORT
|
91 IB_DEVICE_PORT_ACTIVE_EVENT
|
92 IB_DEVICE_SYS_IMAGE_GUID
|
93 IB_DEVICE_RC_RNR_NAK_GEN
|
94 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK
;
95 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR
)
96 props
->device_cap_flags
|= IB_DEVICE_BAD_PKEY_CNTR
;
97 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR
)
98 props
->device_cap_flags
|= IB_DEVICE_BAD_QKEY_CNTR
;
99 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_APM
)
100 props
->device_cap_flags
|= IB_DEVICE_AUTO_PATH_MIG
;
101 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_UD_AV_PORT
)
102 props
->device_cap_flags
|= IB_DEVICE_UD_AV_PORT_ENFORCE
;
103 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IPOIB_CSUM
)
104 props
->device_cap_flags
|= IB_DEVICE_UD_IP_CSUM
;
105 if (dev
->dev
->caps
.max_gso_sz
)
106 props
->device_cap_flags
|= IB_DEVICE_UD_TSO
;
107 if (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_RESERVED_LKEY
)
108 props
->device_cap_flags
|= IB_DEVICE_LOCAL_DMA_LKEY
;
109 if ((dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_LOCAL_INV
) &&
110 (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_REMOTE_INV
) &&
111 (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_FAST_REG_WR
))
112 props
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
114 props
->vendor_id
= be32_to_cpup((__be32
*) (out_mad
->data
+ 36)) &
116 props
->vendor_part_id
= be16_to_cpup((__be16
*) (out_mad
->data
+ 30));
117 props
->hw_ver
= be32_to_cpup((__be32
*) (out_mad
->data
+ 32));
118 memcpy(&props
->sys_image_guid
, out_mad
->data
+ 4, 8);
120 props
->max_mr_size
= ~0ull;
121 props
->page_size_cap
= dev
->dev
->caps
.page_size_cap
;
122 props
->max_qp
= dev
->dev
->caps
.num_qps
- dev
->dev
->caps
.reserved_qps
;
123 props
->max_qp_wr
= dev
->dev
->caps
.max_wqes
;
124 props
->max_sge
= min(dev
->dev
->caps
.max_sq_sg
,
125 dev
->dev
->caps
.max_rq_sg
);
126 props
->max_cq
= dev
->dev
->caps
.num_cqs
- dev
->dev
->caps
.reserved_cqs
;
127 props
->max_cqe
= dev
->dev
->caps
.max_cqes
;
128 props
->max_mr
= dev
->dev
->caps
.num_mpts
- dev
->dev
->caps
.reserved_mrws
;
129 props
->max_pd
= dev
->dev
->caps
.num_pds
- dev
->dev
->caps
.reserved_pds
;
130 props
->max_qp_rd_atom
= dev
->dev
->caps
.max_qp_dest_rdma
;
131 props
->max_qp_init_rd_atom
= dev
->dev
->caps
.max_qp_init_rdma
;
132 props
->max_res_rd_atom
= props
->max_qp_rd_atom
* props
->max_qp
;
133 props
->max_srq
= dev
->dev
->caps
.num_srqs
- dev
->dev
->caps
.reserved_srqs
;
134 props
->max_srq_wr
= dev
->dev
->caps
.max_srq_wqes
- 1;
135 props
->max_srq_sge
= dev
->dev
->caps
.max_srq_sge
;
136 props
->max_fast_reg_page_list_len
= PAGE_SIZE
/ sizeof (u64
);
137 props
->local_ca_ack_delay
= dev
->dev
->caps
.local_ca_ack_delay
;
138 props
->atomic_cap
= dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_ATOMIC
?
139 IB_ATOMIC_HCA
: IB_ATOMIC_NONE
;
140 props
->max_pkeys
= dev
->dev
->caps
.pkey_table_len
[1];
141 props
->max_mcast_grp
= dev
->dev
->caps
.num_mgms
+ dev
->dev
->caps
.num_amgms
;
142 props
->max_mcast_qp_attach
= dev
->dev
->caps
.num_qp_per_mgm
;
143 props
->max_total_mcast_qp_attach
= props
->max_mcast_qp_attach
*
144 props
->max_mcast_grp
;
145 props
->max_map_per_fmr
= (1 << (32 - ilog2(dev
->dev
->caps
.num_mpts
))) - 1;
154 static int mlx4_ib_query_port(struct ib_device
*ibdev
, u8 port
,
155 struct ib_port_attr
*props
)
157 struct ib_smp
*in_mad
= NULL
;
158 struct ib_smp
*out_mad
= NULL
;
161 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
162 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
163 if (!in_mad
|| !out_mad
)
166 memset(props
, 0, sizeof *props
);
168 init_query_mad(in_mad
);
169 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
170 in_mad
->attr_mod
= cpu_to_be32(port
);
172 err
= mlx4_MAD_IFC(to_mdev(ibdev
), 1, 1, port
, NULL
, NULL
, in_mad
, out_mad
);
176 props
->lid
= be16_to_cpup((__be16
*) (out_mad
->data
+ 16));
177 props
->lmc
= out_mad
->data
[34] & 0x7;
178 props
->sm_lid
= be16_to_cpup((__be16
*) (out_mad
->data
+ 18));
179 props
->sm_sl
= out_mad
->data
[36] & 0xf;
180 props
->state
= out_mad
->data
[32] & 0xf;
181 props
->phys_state
= out_mad
->data
[33] >> 4;
182 props
->port_cap_flags
= be32_to_cpup((__be32
*) (out_mad
->data
+ 20));
183 props
->gid_tbl_len
= to_mdev(ibdev
)->dev
->caps
.gid_table_len
[port
];
184 props
->max_msg_sz
= to_mdev(ibdev
)->dev
->caps
.max_msg_sz
;
185 props
->pkey_tbl_len
= to_mdev(ibdev
)->dev
->caps
.pkey_table_len
[port
];
186 props
->bad_pkey_cntr
= be16_to_cpup((__be16
*) (out_mad
->data
+ 46));
187 props
->qkey_viol_cntr
= be16_to_cpup((__be16
*) (out_mad
->data
+ 48));
188 props
->active_width
= out_mad
->data
[31] & 0xf;
189 props
->active_speed
= out_mad
->data
[35] >> 4;
190 props
->max_mtu
= out_mad
->data
[41] & 0xf;
191 props
->active_mtu
= out_mad
->data
[36] >> 4;
192 props
->subnet_timeout
= out_mad
->data
[51] & 0x1f;
193 props
->max_vl_num
= out_mad
->data
[37] >> 4;
194 props
->init_type_reply
= out_mad
->data
[41] >> 4;
203 static int mlx4_ib_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
206 struct ib_smp
*in_mad
= NULL
;
207 struct ib_smp
*out_mad
= NULL
;
210 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
211 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
212 if (!in_mad
|| !out_mad
)
215 init_query_mad(in_mad
);
216 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
217 in_mad
->attr_mod
= cpu_to_be32(port
);
219 err
= mlx4_MAD_IFC(to_mdev(ibdev
), 1, 1, port
, NULL
, NULL
, in_mad
, out_mad
);
223 memcpy(gid
->raw
, out_mad
->data
+ 8, 8);
225 init_query_mad(in_mad
);
226 in_mad
->attr_id
= IB_SMP_ATTR_GUID_INFO
;
227 in_mad
->attr_mod
= cpu_to_be32(index
/ 8);
229 err
= mlx4_MAD_IFC(to_mdev(ibdev
), 1, 1, port
, NULL
, NULL
, in_mad
, out_mad
);
233 memcpy(gid
->raw
+ 8, out_mad
->data
+ (index
% 8) * 8, 8);
241 static int mlx4_ib_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
244 struct ib_smp
*in_mad
= NULL
;
245 struct ib_smp
*out_mad
= NULL
;
248 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
249 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
250 if (!in_mad
|| !out_mad
)
253 init_query_mad(in_mad
);
254 in_mad
->attr_id
= IB_SMP_ATTR_PKEY_TABLE
;
255 in_mad
->attr_mod
= cpu_to_be32(index
/ 32);
257 err
= mlx4_MAD_IFC(to_mdev(ibdev
), 1, 1, port
, NULL
, NULL
, in_mad
, out_mad
);
261 *pkey
= be16_to_cpu(((__be16
*) out_mad
->data
)[index
% 32]);
269 static int mlx4_ib_modify_device(struct ib_device
*ibdev
, int mask
,
270 struct ib_device_modify
*props
)
272 if (mask
& ~IB_DEVICE_MODIFY_NODE_DESC
)
275 if (mask
& IB_DEVICE_MODIFY_NODE_DESC
) {
276 spin_lock(&to_mdev(ibdev
)->sm_lock
);
277 memcpy(ibdev
->node_desc
, props
->node_desc
, 64);
278 spin_unlock(&to_mdev(ibdev
)->sm_lock
);
284 static int mlx4_SET_PORT(struct mlx4_ib_dev
*dev
, u8 port
, int reset_qkey_viols
,
287 struct mlx4_cmd_mailbox
*mailbox
;
290 mailbox
= mlx4_alloc_cmd_mailbox(dev
->dev
);
292 return PTR_ERR(mailbox
);
294 memset(mailbox
->buf
, 0, 256);
296 if (dev
->dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
297 *(u8
*) mailbox
->buf
= !!reset_qkey_viols
<< 6;
298 ((__be32
*) mailbox
->buf
)[2] = cpu_to_be32(cap_mask
);
300 ((u8
*) mailbox
->buf
)[3] = !!reset_qkey_viols
;
301 ((__be32
*) mailbox
->buf
)[1] = cpu_to_be32(cap_mask
);
304 err
= mlx4_cmd(dev
->dev
, mailbox
->dma
, port
, 0, MLX4_CMD_SET_PORT
,
305 MLX4_CMD_TIME_CLASS_B
);
307 mlx4_free_cmd_mailbox(dev
->dev
, mailbox
);
311 static int mlx4_ib_modify_port(struct ib_device
*ibdev
, u8 port
, int mask
,
312 struct ib_port_modify
*props
)
314 struct ib_port_attr attr
;
318 mutex_lock(&to_mdev(ibdev
)->cap_mask_mutex
);
320 err
= mlx4_ib_query_port(ibdev
, port
, &attr
);
324 cap_mask
= (attr
.port_cap_flags
| props
->set_port_cap_mask
) &
325 ~props
->clr_port_cap_mask
;
327 err
= mlx4_SET_PORT(to_mdev(ibdev
), port
,
328 !!(mask
& IB_PORT_RESET_QKEY_CNTR
),
332 mutex_unlock(&to_mdev(ibdev
)->cap_mask_mutex
);
336 static struct ib_ucontext
*mlx4_ib_alloc_ucontext(struct ib_device
*ibdev
,
337 struct ib_udata
*udata
)
339 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
340 struct mlx4_ib_ucontext
*context
;
341 struct mlx4_ib_alloc_ucontext_resp resp
;
344 resp
.qp_tab_size
= dev
->dev
->caps
.num_qps
;
345 resp
.bf_reg_size
= dev
->dev
->caps
.bf_reg_size
;
346 resp
.bf_regs_per_page
= dev
->dev
->caps
.bf_regs_per_page
;
348 context
= kmalloc(sizeof *context
, GFP_KERNEL
);
350 return ERR_PTR(-ENOMEM
);
352 err
= mlx4_uar_alloc(to_mdev(ibdev
)->dev
, &context
->uar
);
358 INIT_LIST_HEAD(&context
->db_page_list
);
359 mutex_init(&context
->db_page_mutex
);
361 err
= ib_copy_to_udata(udata
, &resp
, sizeof resp
);
363 mlx4_uar_free(to_mdev(ibdev
)->dev
, &context
->uar
);
365 return ERR_PTR(-EFAULT
);
368 return &context
->ibucontext
;
371 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
373 struct mlx4_ib_ucontext
*context
= to_mucontext(ibcontext
);
375 mlx4_uar_free(to_mdev(ibcontext
->device
)->dev
, &context
->uar
);
381 static int mlx4_ib_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
383 struct mlx4_ib_dev
*dev
= to_mdev(context
->device
);
385 if (vma
->vm_end
- vma
->vm_start
!= PAGE_SIZE
)
388 if (vma
->vm_pgoff
== 0) {
389 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
391 if (io_remap_pfn_range(vma
, vma
->vm_start
,
392 to_mucontext(context
)->uar
.pfn
,
393 PAGE_SIZE
, vma
->vm_page_prot
))
395 } else if (vma
->vm_pgoff
== 1 && dev
->dev
->caps
.bf_reg_size
!= 0) {
396 /* FIXME want pgprot_writecombine() for BlueFlame pages */
397 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
399 if (io_remap_pfn_range(vma
, vma
->vm_start
,
400 to_mucontext(context
)->uar
.pfn
+
401 dev
->dev
->caps
.num_uars
,
402 PAGE_SIZE
, vma
->vm_page_prot
))
410 static struct ib_pd
*mlx4_ib_alloc_pd(struct ib_device
*ibdev
,
411 struct ib_ucontext
*context
,
412 struct ib_udata
*udata
)
414 struct mlx4_ib_pd
*pd
;
417 pd
= kmalloc(sizeof *pd
, GFP_KERNEL
);
419 return ERR_PTR(-ENOMEM
);
421 err
= mlx4_pd_alloc(to_mdev(ibdev
)->dev
, &pd
->pdn
);
428 if (ib_copy_to_udata(udata
, &pd
->pdn
, sizeof (__u32
))) {
429 mlx4_pd_free(to_mdev(ibdev
)->dev
, pd
->pdn
);
431 return ERR_PTR(-EFAULT
);
437 static int mlx4_ib_dealloc_pd(struct ib_pd
*pd
)
439 mlx4_pd_free(to_mdev(pd
->device
)->dev
, to_mpd(pd
)->pdn
);
445 static int mlx4_ib_mcg_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
447 return mlx4_multicast_attach(to_mdev(ibqp
->device
)->dev
,
448 &to_mqp(ibqp
)->mqp
, gid
->raw
,
449 !!(to_mqp(ibqp
)->flags
&
450 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
));
453 static int mlx4_ib_mcg_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
455 return mlx4_multicast_detach(to_mdev(ibqp
->device
)->dev
,
456 &to_mqp(ibqp
)->mqp
, gid
->raw
);
459 static int init_node_data(struct mlx4_ib_dev
*dev
)
461 struct ib_smp
*in_mad
= NULL
;
462 struct ib_smp
*out_mad
= NULL
;
465 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
466 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
467 if (!in_mad
|| !out_mad
)
470 init_query_mad(in_mad
);
471 in_mad
->attr_id
= IB_SMP_ATTR_NODE_DESC
;
473 err
= mlx4_MAD_IFC(dev
, 1, 1, 1, NULL
, NULL
, in_mad
, out_mad
);
477 memcpy(dev
->ib_dev
.node_desc
, out_mad
->data
, 64);
479 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
481 err
= mlx4_MAD_IFC(dev
, 1, 1, 1, NULL
, NULL
, in_mad
, out_mad
);
485 dev
->dev
->rev_id
= be32_to_cpup((__be32
*) (out_mad
->data
+ 32));
486 memcpy(&dev
->ib_dev
.node_guid
, out_mad
->data
+ 12, 8);
494 static ssize_t
show_hca(struct device
*device
, struct device_attribute
*attr
,
497 struct mlx4_ib_dev
*dev
=
498 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
499 return sprintf(buf
, "MT%d\n", dev
->dev
->pdev
->device
);
502 static ssize_t
show_fw_ver(struct device
*device
, struct device_attribute
*attr
,
505 struct mlx4_ib_dev
*dev
=
506 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
507 return sprintf(buf
, "%d.%d.%d\n", (int) (dev
->dev
->caps
.fw_ver
>> 32),
508 (int) (dev
->dev
->caps
.fw_ver
>> 16) & 0xffff,
509 (int) dev
->dev
->caps
.fw_ver
& 0xffff);
512 static ssize_t
show_rev(struct device
*device
, struct device_attribute
*attr
,
515 struct mlx4_ib_dev
*dev
=
516 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
517 return sprintf(buf
, "%x\n", dev
->dev
->rev_id
);
520 static ssize_t
show_board(struct device
*device
, struct device_attribute
*attr
,
523 struct mlx4_ib_dev
*dev
=
524 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
525 return sprintf(buf
, "%.*s\n", MLX4_BOARD_ID_LEN
,
529 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
530 static DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
531 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
532 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
534 static struct device_attribute
*mlx4_class_attributes
[] = {
541 static void *mlx4_ib_add(struct mlx4_dev
*dev
)
543 static int mlx4_ib_version_printed
;
544 struct mlx4_ib_dev
*ibdev
;
548 if (!mlx4_ib_version_printed
) {
549 printk(KERN_INFO
"%s", mlx4_ib_version
);
550 ++mlx4_ib_version_printed
;
553 ibdev
= (struct mlx4_ib_dev
*) ib_alloc_device(sizeof *ibdev
);
555 dev_err(&dev
->pdev
->dev
, "Device struct alloc failed\n");
559 if (mlx4_pd_alloc(dev
, &ibdev
->priv_pdn
))
562 if (mlx4_uar_alloc(dev
, &ibdev
->priv_uar
))
565 ibdev
->uar_map
= ioremap(ibdev
->priv_uar
.pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
568 MLX4_INIT_DOORBELL_LOCK(&ibdev
->uar_lock
);
572 strlcpy(ibdev
->ib_dev
.name
, "mlx4_%d", IB_DEVICE_NAME_MAX
);
573 ibdev
->ib_dev
.owner
= THIS_MODULE
;
574 ibdev
->ib_dev
.node_type
= RDMA_NODE_IB_CA
;
575 ibdev
->ib_dev
.local_dma_lkey
= dev
->caps
.reserved_lkey
;
576 ibdev
->ib_dev
.phys_port_cnt
= dev
->caps
.num_ports
;
577 ibdev
->ib_dev
.num_comp_vectors
= 1;
578 ibdev
->ib_dev
.dma_device
= &dev
->pdev
->dev
;
580 ibdev
->ib_dev
.uverbs_abi_ver
= MLX4_IB_UVERBS_ABI_VERSION
;
581 ibdev
->ib_dev
.uverbs_cmd_mask
=
582 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
583 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
584 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
585 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
586 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
587 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
588 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
589 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
590 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
591 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ
) |
592 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
593 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
594 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
595 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
596 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
597 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST
) |
598 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST
) |
599 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
600 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
601 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ
) |
602 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
);
604 ibdev
->ib_dev
.query_device
= mlx4_ib_query_device
;
605 ibdev
->ib_dev
.query_port
= mlx4_ib_query_port
;
606 ibdev
->ib_dev
.query_gid
= mlx4_ib_query_gid
;
607 ibdev
->ib_dev
.query_pkey
= mlx4_ib_query_pkey
;
608 ibdev
->ib_dev
.modify_device
= mlx4_ib_modify_device
;
609 ibdev
->ib_dev
.modify_port
= mlx4_ib_modify_port
;
610 ibdev
->ib_dev
.alloc_ucontext
= mlx4_ib_alloc_ucontext
;
611 ibdev
->ib_dev
.dealloc_ucontext
= mlx4_ib_dealloc_ucontext
;
612 ibdev
->ib_dev
.mmap
= mlx4_ib_mmap
;
613 ibdev
->ib_dev
.alloc_pd
= mlx4_ib_alloc_pd
;
614 ibdev
->ib_dev
.dealloc_pd
= mlx4_ib_dealloc_pd
;
615 ibdev
->ib_dev
.create_ah
= mlx4_ib_create_ah
;
616 ibdev
->ib_dev
.query_ah
= mlx4_ib_query_ah
;
617 ibdev
->ib_dev
.destroy_ah
= mlx4_ib_destroy_ah
;
618 ibdev
->ib_dev
.create_srq
= mlx4_ib_create_srq
;
619 ibdev
->ib_dev
.modify_srq
= mlx4_ib_modify_srq
;
620 ibdev
->ib_dev
.query_srq
= mlx4_ib_query_srq
;
621 ibdev
->ib_dev
.destroy_srq
= mlx4_ib_destroy_srq
;
622 ibdev
->ib_dev
.post_srq_recv
= mlx4_ib_post_srq_recv
;
623 ibdev
->ib_dev
.create_qp
= mlx4_ib_create_qp
;
624 ibdev
->ib_dev
.modify_qp
= mlx4_ib_modify_qp
;
625 ibdev
->ib_dev
.query_qp
= mlx4_ib_query_qp
;
626 ibdev
->ib_dev
.destroy_qp
= mlx4_ib_destroy_qp
;
627 ibdev
->ib_dev
.post_send
= mlx4_ib_post_send
;
628 ibdev
->ib_dev
.post_recv
= mlx4_ib_post_recv
;
629 ibdev
->ib_dev
.create_cq
= mlx4_ib_create_cq
;
630 ibdev
->ib_dev
.modify_cq
= mlx4_ib_modify_cq
;
631 ibdev
->ib_dev
.resize_cq
= mlx4_ib_resize_cq
;
632 ibdev
->ib_dev
.destroy_cq
= mlx4_ib_destroy_cq
;
633 ibdev
->ib_dev
.poll_cq
= mlx4_ib_poll_cq
;
634 ibdev
->ib_dev
.req_notify_cq
= mlx4_ib_arm_cq
;
635 ibdev
->ib_dev
.get_dma_mr
= mlx4_ib_get_dma_mr
;
636 ibdev
->ib_dev
.reg_user_mr
= mlx4_ib_reg_user_mr
;
637 ibdev
->ib_dev
.dereg_mr
= mlx4_ib_dereg_mr
;
638 ibdev
->ib_dev
.alloc_fast_reg_mr
= mlx4_ib_alloc_fast_reg_mr
;
639 ibdev
->ib_dev
.alloc_fast_reg_page_list
= mlx4_ib_alloc_fast_reg_page_list
;
640 ibdev
->ib_dev
.free_fast_reg_page_list
= mlx4_ib_free_fast_reg_page_list
;
641 ibdev
->ib_dev
.attach_mcast
= mlx4_ib_mcg_attach
;
642 ibdev
->ib_dev
.detach_mcast
= mlx4_ib_mcg_detach
;
643 ibdev
->ib_dev
.process_mad
= mlx4_ib_process_mad
;
645 ibdev
->ib_dev
.alloc_fmr
= mlx4_ib_fmr_alloc
;
646 ibdev
->ib_dev
.map_phys_fmr
= mlx4_ib_map_phys_fmr
;
647 ibdev
->ib_dev
.unmap_fmr
= mlx4_ib_unmap_fmr
;
648 ibdev
->ib_dev
.dealloc_fmr
= mlx4_ib_fmr_dealloc
;
650 if (init_node_data(ibdev
))
653 spin_lock_init(&ibdev
->sm_lock
);
654 mutex_init(&ibdev
->cap_mask_mutex
);
656 if (ib_register_device(&ibdev
->ib_dev
))
659 if (mlx4_ib_mad_init(ibdev
))
662 for (i
= 0; i
< ARRAY_SIZE(mlx4_class_attributes
); ++i
) {
663 if (device_create_file(&ibdev
->ib_dev
.dev
,
664 mlx4_class_attributes
[i
]))
671 ib_unregister_device(&ibdev
->ib_dev
);
674 iounmap(ibdev
->uar_map
);
677 mlx4_uar_free(dev
, &ibdev
->priv_uar
);
680 mlx4_pd_free(dev
, ibdev
->priv_pdn
);
683 ib_dealloc_device(&ibdev
->ib_dev
);
688 static void mlx4_ib_remove(struct mlx4_dev
*dev
, void *ibdev_ptr
)
690 struct mlx4_ib_dev
*ibdev
= ibdev_ptr
;
693 for (p
= 1; p
<= dev
->caps
.num_ports
; ++p
)
694 mlx4_CLOSE_PORT(dev
, p
);
696 mlx4_ib_mad_cleanup(ibdev
);
697 ib_unregister_device(&ibdev
->ib_dev
);
698 iounmap(ibdev
->uar_map
);
699 mlx4_uar_free(dev
, &ibdev
->priv_uar
);
700 mlx4_pd_free(dev
, ibdev
->priv_pdn
);
701 ib_dealloc_device(&ibdev
->ib_dev
);
704 static void mlx4_ib_event(struct mlx4_dev
*dev
, void *ibdev_ptr
,
705 enum mlx4_dev_event event
, int port
)
707 struct ib_event ibev
;
710 case MLX4_DEV_EVENT_PORT_UP
:
711 ibev
.event
= IB_EVENT_PORT_ACTIVE
;
714 case MLX4_DEV_EVENT_PORT_DOWN
:
715 ibev
.event
= IB_EVENT_PORT_ERR
;
718 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR
:
719 ibev
.event
= IB_EVENT_DEVICE_FATAL
;
726 ibev
.device
= ibdev_ptr
;
727 ibev
.element
.port_num
= port
;
729 ib_dispatch_event(&ibev
);
732 static struct mlx4_interface mlx4_ib_interface
= {
734 .remove
= mlx4_ib_remove
,
735 .event
= mlx4_ib_event
738 static int __init
mlx4_ib_init(void)
740 return mlx4_register_interface(&mlx4_ib_interface
);
743 static void __exit
mlx4_ib_cleanup(void)
745 mlx4_unregister_interface(&mlx4_ib_interface
);
748 module_init(mlx4_ib_init
);
749 module_exit(mlx4_ib_cleanup
);