2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * module start stop, hca detection
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
10 * Copyright (c) 2005 IBM Corporation
12 * All rights reserved.
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
43 #ifdef CONFIG_PPC_64K_PAGES
44 #include <linux/slab.h>
46 #include "ehca_classes.h"
47 #include "ehca_iverbs.h"
48 #include "ehca_mrmw.h"
49 #include "ehca_tools.h"
52 MODULE_LICENSE("Dual BSD/GPL");
53 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
54 MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
55 MODULE_VERSION("SVNEHCA_0023");
57 int ehca_open_aqp1
= 0;
58 int ehca_debug_level
= 0;
59 int ehca_hw_level
= 0;
60 int ehca_nr_ports
= 2;
61 int ehca_use_hp_mr
= 0;
62 int ehca_port_act_time
= 30;
63 int ehca_poll_all_eqs
= 1;
64 int ehca_static_rate
= -1;
65 int ehca_scaling_code
= 0;
67 module_param_named(open_aqp1
, ehca_open_aqp1
, int, 0);
68 module_param_named(debug_level
, ehca_debug_level
, int, 0);
69 module_param_named(hw_level
, ehca_hw_level
, int, 0);
70 module_param_named(nr_ports
, ehca_nr_ports
, int, 0);
71 module_param_named(use_hp_mr
, ehca_use_hp_mr
, int, 0);
72 module_param_named(port_act_time
, ehca_port_act_time
, int, 0);
73 module_param_named(poll_all_eqs
, ehca_poll_all_eqs
, int, 0);
74 module_param_named(static_rate
, ehca_static_rate
, int, 0);
75 module_param_named(scaling_code
, ehca_scaling_code
, int, 0);
77 MODULE_PARM_DESC(open_aqp1
,
78 "AQP1 on startup (0: no (default), 1: yes)");
79 MODULE_PARM_DESC(debug_level
,
81 " (0: no debug traces (default), 1: with debug traces)");
82 MODULE_PARM_DESC(hw_level
,
84 " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)");
85 MODULE_PARM_DESC(nr_ports
,
86 "number of connected ports (default: 2)");
87 MODULE_PARM_DESC(use_hp_mr
,
88 "high performance MRs (0: no (default), 1: yes)");
89 MODULE_PARM_DESC(port_act_time
,
90 "time to wait for port activation (default: 30 sec)");
91 MODULE_PARM_DESC(poll_all_eqs
,
92 "polls all event queues periodically"
93 " (0: no, 1: yes (default))");
94 MODULE_PARM_DESC(static_rate
,
95 "set permanent static rate (default: disabled)");
96 MODULE_PARM_DESC(scaling_code
,
97 "set scaling code (0: disabled/default, 1: enabled)");
99 DEFINE_RWLOCK(ehca_qp_idr_lock
);
100 DEFINE_RWLOCK(ehca_cq_idr_lock
);
101 DEFINE_IDR(ehca_qp_idr
);
102 DEFINE_IDR(ehca_cq_idr
);
104 static LIST_HEAD(shca_list
); /* list of all registered ehcas */
105 static DEFINE_SPINLOCK(shca_list_lock
);
107 static struct timer_list poll_eqs_timer
;
109 #ifdef CONFIG_PPC_64K_PAGES
110 static struct kmem_cache
*ctblk_cache
;
112 void *ehca_alloc_fw_ctrlblock(gfp_t flags
)
114 void *ret
= kmem_cache_zalloc(ctblk_cache
, flags
);
116 ehca_gen_err("Out of memory for ctblk");
120 void ehca_free_fw_ctrlblock(void *ptr
)
123 kmem_cache_free(ctblk_cache
, ptr
);
128 static int ehca_create_slab_caches(void)
132 ret
= ehca_init_pd_cache();
134 ehca_gen_err("Cannot create PD SLAB cache.");
138 ret
= ehca_init_cq_cache();
140 ehca_gen_err("Cannot create CQ SLAB cache.");
141 goto create_slab_caches2
;
144 ret
= ehca_init_qp_cache();
146 ehca_gen_err("Cannot create QP SLAB cache.");
147 goto create_slab_caches3
;
150 ret
= ehca_init_av_cache();
152 ehca_gen_err("Cannot create AV SLAB cache.");
153 goto create_slab_caches4
;
156 ret
= ehca_init_mrmw_cache();
158 ehca_gen_err("Cannot create MR&MW SLAB cache.");
159 goto create_slab_caches5
;
162 #ifdef CONFIG_PPC_64K_PAGES
163 ctblk_cache
= kmem_cache_create("ehca_cache_ctblk",
164 EHCA_PAGESIZE
, H_CB_ALIGNMENT
,
168 ehca_gen_err("Cannot create ctblk SLAB cache.");
169 ehca_cleanup_mrmw_cache();
170 goto create_slab_caches5
;
176 ehca_cleanup_av_cache();
179 ehca_cleanup_qp_cache();
182 ehca_cleanup_cq_cache();
185 ehca_cleanup_pd_cache();
190 static void ehca_destroy_slab_caches(void)
192 ehca_cleanup_mrmw_cache();
193 ehca_cleanup_av_cache();
194 ehca_cleanup_qp_cache();
195 ehca_cleanup_cq_cache();
196 ehca_cleanup_pd_cache();
197 #ifdef CONFIG_PPC_64K_PAGES
199 kmem_cache_destroy(ctblk_cache
);
203 #define EHCA_HCAAVER EHCA_BMASK_IBM(32, 39)
204 #define EHCA_REVID EHCA_BMASK_IBM(40, 63)
206 static struct cap_descr
{
209 } hca_cap_descr
[] = {
210 { HCA_CAP_AH_PORT_NR_CHECK
, "HCA_CAP_AH_PORT_NR_CHECK" },
211 { HCA_CAP_ATOMIC
, "HCA_CAP_ATOMIC" },
212 { HCA_CAP_AUTO_PATH_MIG
, "HCA_CAP_AUTO_PATH_MIG" },
213 { HCA_CAP_BAD_P_KEY_CTR
, "HCA_CAP_BAD_P_KEY_CTR" },
214 { HCA_CAP_SQD_RTS_PORT_CHANGE
, "HCA_CAP_SQD_RTS_PORT_CHANGE" },
215 { HCA_CAP_CUR_QP_STATE_MOD
, "HCA_CAP_CUR_QP_STATE_MOD" },
216 { HCA_CAP_INIT_TYPE
, "HCA_CAP_INIT_TYPE" },
217 { HCA_CAP_PORT_ACTIVE_EVENT
, "HCA_CAP_PORT_ACTIVE_EVENT" },
218 { HCA_CAP_Q_KEY_VIOL_CTR
, "HCA_CAP_Q_KEY_VIOL_CTR" },
219 { HCA_CAP_WQE_RESIZE
, "HCA_CAP_WQE_RESIZE" },
220 { HCA_CAP_RAW_PACKET_MCAST
, "HCA_CAP_RAW_PACKET_MCAST" },
221 { HCA_CAP_SHUTDOWN_PORT
, "HCA_CAP_SHUTDOWN_PORT" },
222 { HCA_CAP_RC_LL_QP
, "HCA_CAP_RC_LL_QP" },
223 { HCA_CAP_SRQ
, "HCA_CAP_SRQ" },
224 { HCA_CAP_UD_LL_QP
, "HCA_CAP_UD_LL_QP" },
225 { HCA_CAP_RESIZE_MR
, "HCA_CAP_RESIZE_MR" },
226 { HCA_CAP_MINI_QP
, "HCA_CAP_MINI_QP" },
229 int ehca_sense_attributes(struct ehca_shca
*shca
)
233 struct hipz_query_hca
*rblock
;
234 struct hipz_query_port
*port
;
236 rblock
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
238 ehca_gen_err("Cannot allocate rblock memory.");
242 h_ret
= hipz_h_query_hca(shca
->ipz_hca_handle
, rblock
);
243 if (h_ret
!= H_SUCCESS
) {
244 ehca_gen_err("Cannot query device properties. h_ret=%lx",
247 goto sense_attributes1
;
250 if (ehca_nr_ports
== 1)
253 shca
->num_ports
= (u8
)rblock
->num_ports
;
255 ehca_gen_dbg(" ... found %x ports", rblock
->num_ports
);
257 if (ehca_hw_level
== 0) {
261 hcaaver
= EHCA_BMASK_GET(EHCA_HCAAVER
, rblock
->hw_ver
);
262 revid
= EHCA_BMASK_GET(EHCA_REVID
, rblock
->hw_ver
);
264 ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver
, revid
);
268 shca
->hw_level
= 0x10 | (revid
+ 1);
270 shca
->hw_level
= 0x14;
271 } else if (hcaaver
== 2) {
273 shca
->hw_level
= 0x21;
274 else if (revid
== 0x10)
275 shca
->hw_level
= 0x22;
276 else if (revid
== 0x20 || revid
== 0x21)
277 shca
->hw_level
= 0x23;
280 if (!shca
->hw_level
) {
281 ehca_gen_warn("unknown hardware version"
282 " - assuming default level");
283 shca
->hw_level
= 0x22;
286 shca
->hw_level
= ehca_hw_level
;
287 ehca_gen_dbg(" ... hardware level=%x", shca
->hw_level
);
289 shca
->sport
[0].rate
= IB_RATE_30_GBPS
;
290 shca
->sport
[1].rate
= IB_RATE_30_GBPS
;
292 shca
->hca_cap
= rblock
->hca_cap_indicators
;
293 ehca_gen_dbg(" ... HCA capabilities:");
294 for (i
= 0; i
< ARRAY_SIZE(hca_cap_descr
); i
++)
295 if (EHCA_BMASK_GET(hca_cap_descr
[i
].mask
, shca
->hca_cap
))
296 ehca_gen_dbg(" %s", hca_cap_descr
[i
].descr
);
298 port
= (struct hipz_query_port
*)rblock
;
299 h_ret
= hipz_h_query_port(shca
->ipz_hca_handle
, 1, port
);
300 if (h_ret
!= H_SUCCESS
) {
301 ehca_gen_err("Cannot query port properties. h_ret=%lx",
304 goto sense_attributes1
;
307 shca
->max_mtu
= port
->max_mtu
;
310 ehca_free_fw_ctrlblock(rblock
);
314 static int init_node_guid(struct ehca_shca
*shca
)
317 struct hipz_query_hca
*rblock
;
319 rblock
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
321 ehca_err(&shca
->ib_device
, "Can't allocate rblock memory.");
325 if (hipz_h_query_hca(shca
->ipz_hca_handle
, rblock
) != H_SUCCESS
) {
326 ehca_err(&shca
->ib_device
, "Can't query device properties");
328 goto init_node_guid1
;
331 memcpy(&shca
->ib_device
.node_guid
, &rblock
->node_guid
, sizeof(u64
));
334 ehca_free_fw_ctrlblock(rblock
);
338 int ehca_init_device(struct ehca_shca
*shca
)
342 ret
= init_node_guid(shca
);
346 strlcpy(shca
->ib_device
.name
, "ehca%d", IB_DEVICE_NAME_MAX
);
347 shca
->ib_device
.owner
= THIS_MODULE
;
349 shca
->ib_device
.uverbs_abi_ver
= 7;
350 shca
->ib_device
.uverbs_cmd_mask
=
351 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
352 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
353 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
354 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
355 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
356 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
357 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
358 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
359 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
360 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
361 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
362 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
363 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
364 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
365 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST
) |
366 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST
);
368 shca
->ib_device
.node_type
= RDMA_NODE_IB_CA
;
369 shca
->ib_device
.phys_port_cnt
= shca
->num_ports
;
370 shca
->ib_device
.num_comp_vectors
= 1;
371 shca
->ib_device
.dma_device
= &shca
->ibmebus_dev
->ofdev
.dev
;
372 shca
->ib_device
.query_device
= ehca_query_device
;
373 shca
->ib_device
.query_port
= ehca_query_port
;
374 shca
->ib_device
.query_gid
= ehca_query_gid
;
375 shca
->ib_device
.query_pkey
= ehca_query_pkey
;
376 /* shca->in_device.modify_device = ehca_modify_device */
377 shca
->ib_device
.modify_port
= ehca_modify_port
;
378 shca
->ib_device
.alloc_ucontext
= ehca_alloc_ucontext
;
379 shca
->ib_device
.dealloc_ucontext
= ehca_dealloc_ucontext
;
380 shca
->ib_device
.alloc_pd
= ehca_alloc_pd
;
381 shca
->ib_device
.dealloc_pd
= ehca_dealloc_pd
;
382 shca
->ib_device
.create_ah
= ehca_create_ah
;
383 /* shca->ib_device.modify_ah = ehca_modify_ah; */
384 shca
->ib_device
.query_ah
= ehca_query_ah
;
385 shca
->ib_device
.destroy_ah
= ehca_destroy_ah
;
386 shca
->ib_device
.create_qp
= ehca_create_qp
;
387 shca
->ib_device
.modify_qp
= ehca_modify_qp
;
388 shca
->ib_device
.query_qp
= ehca_query_qp
;
389 shca
->ib_device
.destroy_qp
= ehca_destroy_qp
;
390 shca
->ib_device
.post_send
= ehca_post_send
;
391 shca
->ib_device
.post_recv
= ehca_post_recv
;
392 shca
->ib_device
.create_cq
= ehca_create_cq
;
393 shca
->ib_device
.destroy_cq
= ehca_destroy_cq
;
394 shca
->ib_device
.resize_cq
= ehca_resize_cq
;
395 shca
->ib_device
.poll_cq
= ehca_poll_cq
;
396 /* shca->ib_device.peek_cq = ehca_peek_cq; */
397 shca
->ib_device
.req_notify_cq
= ehca_req_notify_cq
;
398 /* shca->ib_device.req_ncomp_notif = ehca_req_ncomp_notif; */
399 shca
->ib_device
.get_dma_mr
= ehca_get_dma_mr
;
400 shca
->ib_device
.reg_phys_mr
= ehca_reg_phys_mr
;
401 shca
->ib_device
.reg_user_mr
= ehca_reg_user_mr
;
402 shca
->ib_device
.query_mr
= ehca_query_mr
;
403 shca
->ib_device
.dereg_mr
= ehca_dereg_mr
;
404 shca
->ib_device
.rereg_phys_mr
= ehca_rereg_phys_mr
;
405 shca
->ib_device
.alloc_mw
= ehca_alloc_mw
;
406 shca
->ib_device
.bind_mw
= ehca_bind_mw
;
407 shca
->ib_device
.dealloc_mw
= ehca_dealloc_mw
;
408 shca
->ib_device
.alloc_fmr
= ehca_alloc_fmr
;
409 shca
->ib_device
.map_phys_fmr
= ehca_map_phys_fmr
;
410 shca
->ib_device
.unmap_fmr
= ehca_unmap_fmr
;
411 shca
->ib_device
.dealloc_fmr
= ehca_dealloc_fmr
;
412 shca
->ib_device
.attach_mcast
= ehca_attach_mcast
;
413 shca
->ib_device
.detach_mcast
= ehca_detach_mcast
;
414 /* shca->ib_device.process_mad = ehca_process_mad; */
415 shca
->ib_device
.mmap
= ehca_mmap
;
417 if (EHCA_BMASK_GET(HCA_CAP_SRQ
, shca
->hca_cap
)) {
418 shca
->ib_device
.uverbs_cmd_mask
|=
419 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
420 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
421 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ
) |
422 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
);
424 shca
->ib_device
.create_srq
= ehca_create_srq
;
425 shca
->ib_device
.modify_srq
= ehca_modify_srq
;
426 shca
->ib_device
.query_srq
= ehca_query_srq
;
427 shca
->ib_device
.destroy_srq
= ehca_destroy_srq
;
428 shca
->ib_device
.post_srq_recv
= ehca_post_srq_recv
;
434 static int ehca_create_aqp1(struct ehca_shca
*shca
, u32 port
)
436 struct ehca_sport
*sport
= &shca
->sport
[port
- 1];
439 struct ib_qp_init_attr qp_init_attr
;
442 if (sport
->ibcq_aqp1
) {
443 ehca_err(&shca
->ib_device
, "AQP1 CQ is already created.");
447 ibcq
= ib_create_cq(&shca
->ib_device
, NULL
, NULL
, (void *)(-1), 10, 0);
449 ehca_err(&shca
->ib_device
, "Cannot create AQP1 CQ.");
450 return PTR_ERR(ibcq
);
452 sport
->ibcq_aqp1
= ibcq
;
454 if (sport
->ibqp_aqp1
) {
455 ehca_err(&shca
->ib_device
, "AQP1 QP is already created.");
460 memset(&qp_init_attr
, 0, sizeof(struct ib_qp_init_attr
));
461 qp_init_attr
.send_cq
= ibcq
;
462 qp_init_attr
.recv_cq
= ibcq
;
463 qp_init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
464 qp_init_attr
.cap
.max_send_wr
= 100;
465 qp_init_attr
.cap
.max_recv_wr
= 100;
466 qp_init_attr
.cap
.max_send_sge
= 2;
467 qp_init_attr
.cap
.max_recv_sge
= 1;
468 qp_init_attr
.qp_type
= IB_QPT_GSI
;
469 qp_init_attr
.port_num
= port
;
470 qp_init_attr
.qp_context
= NULL
;
471 qp_init_attr
.event_handler
= NULL
;
472 qp_init_attr
.srq
= NULL
;
474 ibqp
= ib_create_qp(&shca
->pd
->ib_pd
, &qp_init_attr
);
476 ehca_err(&shca
->ib_device
, "Cannot create AQP1 QP.");
480 sport
->ibqp_aqp1
= ibqp
;
485 ib_destroy_cq(sport
->ibcq_aqp1
);
489 static int ehca_destroy_aqp1(struct ehca_sport
*sport
)
493 ret
= ib_destroy_qp(sport
->ibqp_aqp1
);
495 ehca_gen_err("Cannot destroy AQP1 QP. ret=%x", ret
);
499 ret
= ib_destroy_cq(sport
->ibcq_aqp1
);
501 ehca_gen_err("Cannot destroy AQP1 CQ. ret=%x", ret
);
506 static ssize_t
ehca_show_debug_level(struct device_driver
*ddp
, char *buf
)
508 return snprintf(buf
, PAGE_SIZE
, "%d\n",
512 static ssize_t
ehca_store_debug_level(struct device_driver
*ddp
,
513 const char *buf
, size_t count
)
515 int value
= (*buf
) - '0';
516 if (value
>= 0 && value
<= 9)
517 ehca_debug_level
= value
;
521 DRIVER_ATTR(debug_level
, S_IRUSR
| S_IWUSR
,
522 ehca_show_debug_level
, ehca_store_debug_level
);
524 static struct attribute
*ehca_drv_attrs
[] = {
525 &driver_attr_debug_level
.attr
,
529 static struct attribute_group ehca_drv_attr_grp
= {
530 .attrs
= ehca_drv_attrs
533 #define EHCA_RESOURCE_ATTR(name) \
534 static ssize_t ehca_show_##name(struct device *dev, \
535 struct device_attribute *attr, \
538 struct ehca_shca *shca; \
539 struct hipz_query_hca *rblock; \
542 shca = dev->driver_data; \
544 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \
546 dev_err(dev, "Can't allocate rblock memory."); \
550 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \
551 dev_err(dev, "Can't query device properties"); \
552 ehca_free_fw_ctrlblock(rblock); \
556 data = rblock->name; \
557 ehca_free_fw_ctrlblock(rblock); \
559 if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \
560 return snprintf(buf, 256, "1\n"); \
562 return snprintf(buf, 256, "%d\n", data); \
565 static DEVICE_ATTR(name, S_IRUGO, ehca_show_##name, NULL);
567 EHCA_RESOURCE_ATTR(num_ports
);
568 EHCA_RESOURCE_ATTR(hw_ver
);
569 EHCA_RESOURCE_ATTR(max_eq
);
570 EHCA_RESOURCE_ATTR(cur_eq
);
571 EHCA_RESOURCE_ATTR(max_cq
);
572 EHCA_RESOURCE_ATTR(cur_cq
);
573 EHCA_RESOURCE_ATTR(max_qp
);
574 EHCA_RESOURCE_ATTR(cur_qp
);
575 EHCA_RESOURCE_ATTR(max_mr
);
576 EHCA_RESOURCE_ATTR(cur_mr
);
577 EHCA_RESOURCE_ATTR(max_mw
);
578 EHCA_RESOURCE_ATTR(cur_mw
);
579 EHCA_RESOURCE_ATTR(max_pd
);
580 EHCA_RESOURCE_ATTR(max_ah
);
582 static ssize_t
ehca_show_adapter_handle(struct device
*dev
,
583 struct device_attribute
*attr
,
586 struct ehca_shca
*shca
= dev
->driver_data
;
588 return sprintf(buf
, "%lx\n", shca
->ipz_hca_handle
.handle
);
591 static DEVICE_ATTR(adapter_handle
, S_IRUGO
, ehca_show_adapter_handle
, NULL
);
593 static struct attribute
*ehca_dev_attrs
[] = {
594 &dev_attr_adapter_handle
.attr
,
595 &dev_attr_num_ports
.attr
,
596 &dev_attr_hw_ver
.attr
,
597 &dev_attr_max_eq
.attr
,
598 &dev_attr_cur_eq
.attr
,
599 &dev_attr_max_cq
.attr
,
600 &dev_attr_cur_cq
.attr
,
601 &dev_attr_max_qp
.attr
,
602 &dev_attr_cur_qp
.attr
,
603 &dev_attr_max_mr
.attr
,
604 &dev_attr_cur_mr
.attr
,
605 &dev_attr_max_mw
.attr
,
606 &dev_attr_cur_mw
.attr
,
607 &dev_attr_max_pd
.attr
,
608 &dev_attr_max_ah
.attr
,
612 static struct attribute_group ehca_dev_attr_grp
= {
613 .attrs
= ehca_dev_attrs
616 static int __devinit
ehca_probe(struct ibmebus_dev
*dev
,
617 const struct of_device_id
*id
)
619 struct ehca_shca
*shca
;
624 handle
= of_get_property(dev
->ofdev
.node
, "ibm,hca-handle", NULL
);
626 ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
627 dev
->ofdev
.node
->full_name
);
632 ehca_gen_err("Wrong eHCA handle for adapter: %s.",
633 dev
->ofdev
.node
->full_name
);
637 shca
= (struct ehca_shca
*)ib_alloc_device(sizeof(*shca
));
639 ehca_gen_err("Cannot allocate shca memory.");
642 mutex_init(&shca
->modify_mutex
);
644 shca
->ibmebus_dev
= dev
;
645 shca
->ipz_hca_handle
.handle
= *handle
;
646 dev
->ofdev
.dev
.driver_data
= shca
;
648 ret
= ehca_sense_attributes(shca
);
650 ehca_gen_err("Cannot sense eHCA attributes.");
654 ret
= ehca_init_device(shca
);
656 ehca_gen_err("Cannot init ehca device struct");
660 /* create event queues */
661 ret
= ehca_create_eq(shca
, &shca
->eq
, EHCA_EQ
, 2048);
663 ehca_err(&shca
->ib_device
, "Cannot create EQ.");
667 ret
= ehca_create_eq(shca
, &shca
->neq
, EHCA_NEQ
, 513);
669 ehca_err(&shca
->ib_device
, "Cannot create NEQ.");
673 /* create internal protection domain */
674 ibpd
= ehca_alloc_pd(&shca
->ib_device
, (void *)(-1), NULL
);
676 ehca_err(&shca
->ib_device
, "Cannot create internal PD.");
681 shca
->pd
= container_of(ibpd
, struct ehca_pd
, ib_pd
);
682 shca
->pd
->ib_pd
.device
= &shca
->ib_device
;
684 /* create internal max MR */
685 ret
= ehca_reg_internal_maxmr(shca
, shca
->pd
, &shca
->maxmr
);
688 ehca_err(&shca
->ib_device
, "Cannot create internal MR ret=%x",
693 ret
= ib_register_device(&shca
->ib_device
);
695 ehca_err(&shca
->ib_device
,
696 "ib_register_device() failed ret=%x", ret
);
700 /* create AQP1 for port 1 */
701 if (ehca_open_aqp1
== 1) {
702 shca
->sport
[0].port_state
= IB_PORT_DOWN
;
703 ret
= ehca_create_aqp1(shca
, 1);
705 ehca_err(&shca
->ib_device
,
706 "Cannot create AQP1 for port 1.");
711 /* create AQP1 for port 2 */
712 if ((ehca_open_aqp1
== 1) && (shca
->num_ports
== 2)) {
713 shca
->sport
[1].port_state
= IB_PORT_DOWN
;
714 ret
= ehca_create_aqp1(shca
, 2);
716 ehca_err(&shca
->ib_device
,
717 "Cannot create AQP1 for port 2.");
722 ret
= sysfs_create_group(&dev
->ofdev
.dev
.kobj
, &ehca_dev_attr_grp
);
723 if (ret
) /* only complain; we can live without attributes */
724 ehca_err(&shca
->ib_device
,
725 "Cannot create device attributes ret=%d", ret
);
727 spin_lock(&shca_list_lock
);
728 list_add(&shca
->shca_list
, &shca_list
);
729 spin_unlock(&shca_list_lock
);
734 ret
= ehca_destroy_aqp1(&shca
->sport
[0]);
736 ehca_err(&shca
->ib_device
,
737 "Cannot destroy AQP1 for port 1. ret=%x", ret
);
740 ib_unregister_device(&shca
->ib_device
);
743 ret
= ehca_dereg_internal_maxmr(shca
);
745 ehca_err(&shca
->ib_device
,
746 "Cannot destroy internal MR. ret=%x", ret
);
749 ret
= ehca_dealloc_pd(&shca
->pd
->ib_pd
);
751 ehca_err(&shca
->ib_device
,
752 "Cannot destroy internal PD. ret=%x", ret
);
755 ret
= ehca_destroy_eq(shca
, &shca
->neq
);
757 ehca_err(&shca
->ib_device
,
758 "Cannot destroy NEQ. ret=%x", ret
);
761 ret
= ehca_destroy_eq(shca
, &shca
->eq
);
763 ehca_err(&shca
->ib_device
,
764 "Cannot destroy EQ. ret=%x", ret
);
767 ib_dealloc_device(&shca
->ib_device
);
772 static int __devexit
ehca_remove(struct ibmebus_dev
*dev
)
774 struct ehca_shca
*shca
= dev
->ofdev
.dev
.driver_data
;
777 sysfs_remove_group(&dev
->ofdev
.dev
.kobj
, &ehca_dev_attr_grp
);
779 if (ehca_open_aqp1
== 1) {
781 for (i
= 0; i
< shca
->num_ports
; i
++) {
782 ret
= ehca_destroy_aqp1(&shca
->sport
[i
]);
784 ehca_err(&shca
->ib_device
,
785 "Cannot destroy AQP1 for port %x "
790 ib_unregister_device(&shca
->ib_device
);
792 ret
= ehca_dereg_internal_maxmr(shca
);
794 ehca_err(&shca
->ib_device
,
795 "Cannot destroy internal MR. ret=%x", ret
);
797 ret
= ehca_dealloc_pd(&shca
->pd
->ib_pd
);
799 ehca_err(&shca
->ib_device
,
800 "Cannot destroy internal PD. ret=%x", ret
);
802 ret
= ehca_destroy_eq(shca
, &shca
->eq
);
804 ehca_err(&shca
->ib_device
, "Cannot destroy EQ. ret=%x", ret
);
806 ret
= ehca_destroy_eq(shca
, &shca
->neq
);
808 ehca_err(&shca
->ib_device
, "Canot destroy NEQ. ret=%x", ret
);
810 ib_dealloc_device(&shca
->ib_device
);
812 spin_lock(&shca_list_lock
);
813 list_del(&shca
->shca_list
);
814 spin_unlock(&shca_list_lock
);
819 static struct of_device_id ehca_device_table
[] =
823 .compatible
= "IBM,lhca",
828 static struct ibmebus_driver ehca_driver
= {
830 .id_table
= ehca_device_table
,
832 .remove
= ehca_remove
,
835 void ehca_poll_eqs(unsigned long data
)
837 struct ehca_shca
*shca
;
839 spin_lock(&shca_list_lock
);
840 list_for_each_entry(shca
, &shca_list
, shca_list
) {
841 if (shca
->eq
.is_initialized
) {
842 /* call deadman proc only if eq ptr does not change */
843 struct ehca_eq
*eq
= &shca
->eq
;
845 volatile u64 q_ofs
, q_ofs2
;
847 spin_lock_irqsave(&eq
->spinlock
, flags
);
848 q_ofs
= eq
->ipz_queue
.current_q_offset
;
849 spin_unlock_irqrestore(&eq
->spinlock
, flags
);
851 spin_lock_irqsave(&eq
->spinlock
, flags
);
852 q_ofs2
= eq
->ipz_queue
.current_q_offset
;
853 spin_unlock_irqrestore(&eq
->spinlock
, flags
);
855 } while (q_ofs
== q_ofs2
&& max
> 0);
857 ehca_process_eq(shca
, 0);
860 mod_timer(&poll_eqs_timer
, jiffies
+ HZ
);
861 spin_unlock(&shca_list_lock
);
864 int __init
ehca_module_init(void)
868 printk(KERN_INFO
"eHCA Infiniband Device Driver "
869 "(Rel.: SVNEHCA_0023)\n");
871 ret
= ehca_create_comp_pool();
873 ehca_gen_err("Cannot create comp pool.");
877 ret
= ehca_create_slab_caches();
879 ehca_gen_err("Cannot create SLAB caches");
884 ret
= ibmebus_register_driver(&ehca_driver
);
886 ehca_gen_err("Cannot register eHCA device driver");
891 ret
= sysfs_create_group(&ehca_driver
.driver
.kobj
, &ehca_drv_attr_grp
);
892 if (ret
) /* only complain; we can live without attributes */
893 ehca_gen_err("Cannot create driver attributes ret=%d", ret
);
895 if (ehca_poll_all_eqs
!= 1) {
896 ehca_gen_err("WARNING!!!");
897 ehca_gen_err("It is possible to lose interrupts.");
899 init_timer(&poll_eqs_timer
);
900 poll_eqs_timer
.function
= ehca_poll_eqs
;
901 poll_eqs_timer
.expires
= jiffies
+ HZ
;
902 add_timer(&poll_eqs_timer
);
908 ehca_destroy_slab_caches();
911 ehca_destroy_comp_pool();
915 void __exit
ehca_module_exit(void)
917 if (ehca_poll_all_eqs
== 1)
918 del_timer_sync(&poll_eqs_timer
);
920 sysfs_remove_group(&ehca_driver
.driver
.kobj
, &ehca_drv_attr_grp
);
921 ibmebus_unregister_driver(&ehca_driver
);
923 ehca_destroy_slab_caches();
925 ehca_destroy_comp_pool();
927 idr_destroy(&ehca_cq_idr
);
928 idr_destroy(&ehca_qp_idr
);
931 module_init(ehca_module_init
);
932 module_exit(ehca_module_exit
);