2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * module start stop, hca detection
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
10 * Copyright (c) 2005 IBM Corporation
12 * All rights reserved.
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
43 #ifdef CONFIG_PPC_64K_PAGES
44 #include <linux/slab.h>
46 #include "ehca_classes.h"
47 #include "ehca_iverbs.h"
48 #include "ehca_mrmw.h"
49 #include "ehca_tools.h"
52 MODULE_LICENSE("Dual BSD/GPL");
53 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
54 MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
55 MODULE_VERSION("SVNEHCA_0023");
57 int ehca_open_aqp1
= 0;
58 int ehca_debug_level
= 0;
59 int ehca_hw_level
= 0;
60 int ehca_nr_ports
= 2;
61 int ehca_use_hp_mr
= 0;
62 int ehca_port_act_time
= 30;
63 int ehca_poll_all_eqs
= 1;
64 int ehca_static_rate
= -1;
65 int ehca_scaling_code
= 0;
67 module_param_named(open_aqp1
, ehca_open_aqp1
, int, 0);
68 module_param_named(debug_level
, ehca_debug_level
, int, 0);
69 module_param_named(hw_level
, ehca_hw_level
, int, 0);
70 module_param_named(nr_ports
, ehca_nr_ports
, int, 0);
71 module_param_named(use_hp_mr
, ehca_use_hp_mr
, int, 0);
72 module_param_named(port_act_time
, ehca_port_act_time
, int, 0);
73 module_param_named(poll_all_eqs
, ehca_poll_all_eqs
, int, 0);
74 module_param_named(static_rate
, ehca_static_rate
, int, 0);
75 module_param_named(scaling_code
, ehca_scaling_code
, int, 0);
77 MODULE_PARM_DESC(open_aqp1
,
78 "AQP1 on startup (0: no (default), 1: yes)");
79 MODULE_PARM_DESC(debug_level
,
81 " (0: no debug traces (default), 1: with debug traces)");
82 MODULE_PARM_DESC(hw_level
,
84 " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)");
85 MODULE_PARM_DESC(nr_ports
,
86 "number of connected ports (default: 2)");
87 MODULE_PARM_DESC(use_hp_mr
,
88 "high performance MRs (0: no (default), 1: yes)");
89 MODULE_PARM_DESC(port_act_time
,
90 "time to wait for port activation (default: 30 sec)");
91 MODULE_PARM_DESC(poll_all_eqs
,
92 "polls all event queues periodically"
93 " (0: no, 1: yes (default))");
94 MODULE_PARM_DESC(static_rate
,
95 "set permanent static rate (default: disabled)");
96 MODULE_PARM_DESC(scaling_code
,
97 "set scaling code (0: disabled, 1: enabled/default)");
99 spinlock_t ehca_qp_idr_lock
;
100 spinlock_t ehca_cq_idr_lock
;
101 spinlock_t hcall_lock
;
102 DEFINE_IDR(ehca_qp_idr
);
103 DEFINE_IDR(ehca_cq_idr
);
106 static struct list_head shca_list
; /* list of all registered ehcas */
107 static spinlock_t shca_list_lock
;
109 static struct timer_list poll_eqs_timer
;
111 #ifdef CONFIG_PPC_64K_PAGES
112 static struct kmem_cache
*ctblk_cache
= NULL
;
114 void *ehca_alloc_fw_ctrlblock(gfp_t flags
)
116 void *ret
= kmem_cache_zalloc(ctblk_cache
, flags
);
118 ehca_gen_err("Out of memory for ctblk");
122 void ehca_free_fw_ctrlblock(void *ptr
)
125 kmem_cache_free(ctblk_cache
, ptr
);
130 static int ehca_create_slab_caches(void)
134 ret
= ehca_init_pd_cache();
136 ehca_gen_err("Cannot create PD SLAB cache.");
140 ret
= ehca_init_cq_cache();
142 ehca_gen_err("Cannot create CQ SLAB cache.");
143 goto create_slab_caches2
;
146 ret
= ehca_init_qp_cache();
148 ehca_gen_err("Cannot create QP SLAB cache.");
149 goto create_slab_caches3
;
152 ret
= ehca_init_av_cache();
154 ehca_gen_err("Cannot create AV SLAB cache.");
155 goto create_slab_caches4
;
158 ret
= ehca_init_mrmw_cache();
160 ehca_gen_err("Cannot create MR&MW SLAB cache.");
161 goto create_slab_caches5
;
164 #ifdef CONFIG_PPC_64K_PAGES
165 ctblk_cache
= kmem_cache_create("ehca_cache_ctblk",
166 EHCA_PAGESIZE
, H_CB_ALIGNMENT
,
170 ehca_gen_err("Cannot create ctblk SLAB cache.");
171 ehca_cleanup_mrmw_cache();
172 goto create_slab_caches5
;
178 ehca_cleanup_av_cache();
181 ehca_cleanup_qp_cache();
184 ehca_cleanup_cq_cache();
187 ehca_cleanup_pd_cache();
192 static void ehca_destroy_slab_caches(void)
194 ehca_cleanup_mrmw_cache();
195 ehca_cleanup_av_cache();
196 ehca_cleanup_qp_cache();
197 ehca_cleanup_cq_cache();
198 ehca_cleanup_pd_cache();
199 #ifdef CONFIG_PPC_64K_PAGES
201 kmem_cache_destroy(ctblk_cache
);
205 #define EHCA_HCAAVER EHCA_BMASK_IBM(32,39)
206 #define EHCA_REVID EHCA_BMASK_IBM(40,63)
208 int ehca_sense_attributes(struct ehca_shca
*shca
)
212 struct hipz_query_hca
*rblock
;
214 rblock
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
216 ehca_gen_err("Cannot allocate rblock memory.");
220 h_ret
= hipz_h_query_hca(shca
->ipz_hca_handle
, rblock
);
221 if (h_ret
!= H_SUCCESS
) {
222 ehca_gen_err("Cannot query device properties. h_ret=%lx",
228 if (ehca_nr_ports
== 1)
231 shca
->num_ports
= (u8
)rblock
->num_ports
;
233 ehca_gen_dbg(" ... found %x ports", rblock
->num_ports
);
235 if (ehca_hw_level
== 0) {
239 hcaaver
= EHCA_BMASK_GET(EHCA_HCAAVER
, rblock
->hw_ver
);
240 revid
= EHCA_BMASK_GET(EHCA_REVID
, rblock
->hw_ver
);
242 ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver
, revid
);
244 if ((hcaaver
== 1) && (revid
== 0))
246 else if ((hcaaver
== 1) && (revid
== 1))
248 else if ((hcaaver
== 1) && (revid
== 2))
251 ehca_gen_dbg(" ... hardware level=%x", shca
->hw_level
);
253 shca
->sport
[0].rate
= IB_RATE_30_GBPS
;
254 shca
->sport
[1].rate
= IB_RATE_30_GBPS
;
257 ehca_free_fw_ctrlblock(rblock
);
261 static int init_node_guid(struct ehca_shca
*shca
)
264 struct hipz_query_hca
*rblock
;
266 rblock
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
268 ehca_err(&shca
->ib_device
, "Can't allocate rblock memory.");
272 if (hipz_h_query_hca(shca
->ipz_hca_handle
, rblock
) != H_SUCCESS
) {
273 ehca_err(&shca
->ib_device
, "Can't query device properties");
275 goto init_node_guid1
;
278 memcpy(&shca
->ib_device
.node_guid
, &rblock
->node_guid
, sizeof(u64
));
281 ehca_free_fw_ctrlblock(rblock
);
285 int ehca_init_device(struct ehca_shca
*shca
)
289 ret
= init_node_guid(shca
);
293 strlcpy(shca
->ib_device
.name
, "ehca%d", IB_DEVICE_NAME_MAX
);
294 shca
->ib_device
.owner
= THIS_MODULE
;
296 shca
->ib_device
.uverbs_abi_ver
= 6;
297 shca
->ib_device
.uverbs_cmd_mask
=
298 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
299 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
300 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
301 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
302 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
303 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
304 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
305 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
306 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
307 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
308 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
309 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
310 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
311 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
312 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST
) |
313 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST
);
315 shca
->ib_device
.node_type
= RDMA_NODE_IB_CA
;
316 shca
->ib_device
.phys_port_cnt
= shca
->num_ports
;
317 shca
->ib_device
.num_comp_vectors
= 1;
318 shca
->ib_device
.dma_device
= &shca
->ibmebus_dev
->ofdev
.dev
;
319 shca
->ib_device
.query_device
= ehca_query_device
;
320 shca
->ib_device
.query_port
= ehca_query_port
;
321 shca
->ib_device
.query_gid
= ehca_query_gid
;
322 shca
->ib_device
.query_pkey
= ehca_query_pkey
;
323 /* shca->in_device.modify_device = ehca_modify_device */
324 shca
->ib_device
.modify_port
= ehca_modify_port
;
325 shca
->ib_device
.alloc_ucontext
= ehca_alloc_ucontext
;
326 shca
->ib_device
.dealloc_ucontext
= ehca_dealloc_ucontext
;
327 shca
->ib_device
.alloc_pd
= ehca_alloc_pd
;
328 shca
->ib_device
.dealloc_pd
= ehca_dealloc_pd
;
329 shca
->ib_device
.create_ah
= ehca_create_ah
;
330 /* shca->ib_device.modify_ah = ehca_modify_ah; */
331 shca
->ib_device
.query_ah
= ehca_query_ah
;
332 shca
->ib_device
.destroy_ah
= ehca_destroy_ah
;
333 shca
->ib_device
.create_qp
= ehca_create_qp
;
334 shca
->ib_device
.modify_qp
= ehca_modify_qp
;
335 shca
->ib_device
.query_qp
= ehca_query_qp
;
336 shca
->ib_device
.destroy_qp
= ehca_destroy_qp
;
337 shca
->ib_device
.post_send
= ehca_post_send
;
338 shca
->ib_device
.post_recv
= ehca_post_recv
;
339 shca
->ib_device
.create_cq
= ehca_create_cq
;
340 shca
->ib_device
.destroy_cq
= ehca_destroy_cq
;
341 shca
->ib_device
.resize_cq
= ehca_resize_cq
;
342 shca
->ib_device
.poll_cq
= ehca_poll_cq
;
343 /* shca->ib_device.peek_cq = ehca_peek_cq; */
344 shca
->ib_device
.req_notify_cq
= ehca_req_notify_cq
;
345 /* shca->ib_device.req_ncomp_notif = ehca_req_ncomp_notif; */
346 shca
->ib_device
.get_dma_mr
= ehca_get_dma_mr
;
347 shca
->ib_device
.reg_phys_mr
= ehca_reg_phys_mr
;
348 shca
->ib_device
.reg_user_mr
= ehca_reg_user_mr
;
349 shca
->ib_device
.query_mr
= ehca_query_mr
;
350 shca
->ib_device
.dereg_mr
= ehca_dereg_mr
;
351 shca
->ib_device
.rereg_phys_mr
= ehca_rereg_phys_mr
;
352 shca
->ib_device
.alloc_mw
= ehca_alloc_mw
;
353 shca
->ib_device
.bind_mw
= ehca_bind_mw
;
354 shca
->ib_device
.dealloc_mw
= ehca_dealloc_mw
;
355 shca
->ib_device
.alloc_fmr
= ehca_alloc_fmr
;
356 shca
->ib_device
.map_phys_fmr
= ehca_map_phys_fmr
;
357 shca
->ib_device
.unmap_fmr
= ehca_unmap_fmr
;
358 shca
->ib_device
.dealloc_fmr
= ehca_dealloc_fmr
;
359 shca
->ib_device
.attach_mcast
= ehca_attach_mcast
;
360 shca
->ib_device
.detach_mcast
= ehca_detach_mcast
;
361 /* shca->ib_device.process_mad = ehca_process_mad; */
362 shca
->ib_device
.mmap
= ehca_mmap
;
367 static int ehca_create_aqp1(struct ehca_shca
*shca
, u32 port
)
369 struct ehca_sport
*sport
= &shca
->sport
[port
- 1];
372 struct ib_qp_init_attr qp_init_attr
;
375 if (sport
->ibcq_aqp1
) {
376 ehca_err(&shca
->ib_device
, "AQP1 CQ is already created.");
380 ibcq
= ib_create_cq(&shca
->ib_device
, NULL
, NULL
, (void*)(-1), 10, 0);
382 ehca_err(&shca
->ib_device
, "Cannot create AQP1 CQ.");
383 return PTR_ERR(ibcq
);
385 sport
->ibcq_aqp1
= ibcq
;
387 if (sport
->ibqp_aqp1
) {
388 ehca_err(&shca
->ib_device
, "AQP1 QP is already created.");
393 memset(&qp_init_attr
, 0, sizeof(struct ib_qp_init_attr
));
394 qp_init_attr
.send_cq
= ibcq
;
395 qp_init_attr
.recv_cq
= ibcq
;
396 qp_init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
397 qp_init_attr
.cap
.max_send_wr
= 100;
398 qp_init_attr
.cap
.max_recv_wr
= 100;
399 qp_init_attr
.cap
.max_send_sge
= 2;
400 qp_init_attr
.cap
.max_recv_sge
= 1;
401 qp_init_attr
.qp_type
= IB_QPT_GSI
;
402 qp_init_attr
.port_num
= port
;
403 qp_init_attr
.qp_context
= NULL
;
404 qp_init_attr
.event_handler
= NULL
;
405 qp_init_attr
.srq
= NULL
;
407 ibqp
= ib_create_qp(&shca
->pd
->ib_pd
, &qp_init_attr
);
409 ehca_err(&shca
->ib_device
, "Cannot create AQP1 QP.");
413 sport
->ibqp_aqp1
= ibqp
;
418 ib_destroy_cq(sport
->ibcq_aqp1
);
422 static int ehca_destroy_aqp1(struct ehca_sport
*sport
)
426 ret
= ib_destroy_qp(sport
->ibqp_aqp1
);
428 ehca_gen_err("Cannot destroy AQP1 QP. ret=%x", ret
);
432 ret
= ib_destroy_cq(sport
->ibcq_aqp1
);
434 ehca_gen_err("Cannot destroy AQP1 CQ. ret=%x", ret
);
439 static ssize_t
ehca_show_debug_level(struct device_driver
*ddp
, char *buf
)
441 return snprintf(buf
, PAGE_SIZE
, "%d\n",
445 static ssize_t
ehca_store_debug_level(struct device_driver
*ddp
,
446 const char *buf
, size_t count
)
448 int value
= (*buf
) - '0';
449 if (value
>= 0 && value
<= 9)
450 ehca_debug_level
= value
;
454 DRIVER_ATTR(debug_level
, S_IRUSR
| S_IWUSR
,
455 ehca_show_debug_level
, ehca_store_debug_level
);
457 static struct attribute
*ehca_drv_attrs
[] = {
458 &driver_attr_debug_level
.attr
,
462 static struct attribute_group ehca_drv_attr_grp
= {
463 .attrs
= ehca_drv_attrs
466 #define EHCA_RESOURCE_ATTR(name) \
467 static ssize_t ehca_show_##name(struct device *dev, \
468 struct device_attribute *attr, \
471 struct ehca_shca *shca; \
472 struct hipz_query_hca *rblock; \
475 shca = dev->driver_data; \
477 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \
479 dev_err(dev, "Can't allocate rblock memory."); \
483 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \
484 dev_err(dev, "Can't query device properties"); \
485 ehca_free_fw_ctrlblock(rblock); \
489 data = rblock->name; \
490 ehca_free_fw_ctrlblock(rblock); \
492 if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \
493 return snprintf(buf, 256, "1\n"); \
495 return snprintf(buf, 256, "%d\n", data); \
498 static DEVICE_ATTR(name, S_IRUGO, ehca_show_##name, NULL);
500 EHCA_RESOURCE_ATTR(num_ports
);
501 EHCA_RESOURCE_ATTR(hw_ver
);
502 EHCA_RESOURCE_ATTR(max_eq
);
503 EHCA_RESOURCE_ATTR(cur_eq
);
504 EHCA_RESOURCE_ATTR(max_cq
);
505 EHCA_RESOURCE_ATTR(cur_cq
);
506 EHCA_RESOURCE_ATTR(max_qp
);
507 EHCA_RESOURCE_ATTR(cur_qp
);
508 EHCA_RESOURCE_ATTR(max_mr
);
509 EHCA_RESOURCE_ATTR(cur_mr
);
510 EHCA_RESOURCE_ATTR(max_mw
);
511 EHCA_RESOURCE_ATTR(cur_mw
);
512 EHCA_RESOURCE_ATTR(max_pd
);
513 EHCA_RESOURCE_ATTR(max_ah
);
515 static ssize_t
ehca_show_adapter_handle(struct device
*dev
,
516 struct device_attribute
*attr
,
519 struct ehca_shca
*shca
= dev
->driver_data
;
521 return sprintf(buf
, "%lx\n", shca
->ipz_hca_handle
.handle
);
524 static DEVICE_ATTR(adapter_handle
, S_IRUGO
, ehca_show_adapter_handle
, NULL
);
526 static struct attribute
*ehca_dev_attrs
[] = {
527 &dev_attr_adapter_handle
.attr
,
528 &dev_attr_num_ports
.attr
,
529 &dev_attr_hw_ver
.attr
,
530 &dev_attr_max_eq
.attr
,
531 &dev_attr_cur_eq
.attr
,
532 &dev_attr_max_cq
.attr
,
533 &dev_attr_cur_cq
.attr
,
534 &dev_attr_max_qp
.attr
,
535 &dev_attr_cur_qp
.attr
,
536 &dev_attr_max_mr
.attr
,
537 &dev_attr_cur_mr
.attr
,
538 &dev_attr_max_mw
.attr
,
539 &dev_attr_cur_mw
.attr
,
540 &dev_attr_max_pd
.attr
,
541 &dev_attr_max_ah
.attr
,
545 static struct attribute_group ehca_dev_attr_grp
= {
546 .attrs
= ehca_dev_attrs
549 static int __devinit
ehca_probe(struct ibmebus_dev
*dev
,
550 const struct of_device_id
*id
)
552 struct ehca_shca
*shca
;
557 handle
= of_get_property(dev
->ofdev
.node
, "ibm,hca-handle", NULL
);
559 ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
560 dev
->ofdev
.node
->full_name
);
565 ehca_gen_err("Wrong eHCA handle for adapter: %s.",
566 dev
->ofdev
.node
->full_name
);
570 shca
= (struct ehca_shca
*)ib_alloc_device(sizeof(*shca
));
572 ehca_gen_err("Cannot allocate shca memory.");
575 mutex_init(&shca
->modify_mutex
);
577 shca
->ibmebus_dev
= dev
;
578 shca
->ipz_hca_handle
.handle
= *handle
;
579 dev
->ofdev
.dev
.driver_data
= shca
;
581 ret
= ehca_sense_attributes(shca
);
583 ehca_gen_err("Cannot sense eHCA attributes.");
587 ret
= ehca_init_device(shca
);
589 ehca_gen_err("Cannot init ehca device struct");
593 /* create event queues */
594 ret
= ehca_create_eq(shca
, &shca
->eq
, EHCA_EQ
, 2048);
596 ehca_err(&shca
->ib_device
, "Cannot create EQ.");
600 ret
= ehca_create_eq(shca
, &shca
->neq
, EHCA_NEQ
, 513);
602 ehca_err(&shca
->ib_device
, "Cannot create NEQ.");
606 /* create internal protection domain */
607 ibpd
= ehca_alloc_pd(&shca
->ib_device
, (void*)(-1), NULL
);
609 ehca_err(&shca
->ib_device
, "Cannot create internal PD.");
614 shca
->pd
= container_of(ibpd
, struct ehca_pd
, ib_pd
);
615 shca
->pd
->ib_pd
.device
= &shca
->ib_device
;
617 /* create internal max MR */
618 ret
= ehca_reg_internal_maxmr(shca
, shca
->pd
, &shca
->maxmr
);
621 ehca_err(&shca
->ib_device
, "Cannot create internal MR ret=%x",
626 ret
= ib_register_device(&shca
->ib_device
);
628 ehca_err(&shca
->ib_device
,
629 "ib_register_device() failed ret=%x", ret
);
633 /* create AQP1 for port 1 */
634 if (ehca_open_aqp1
== 1) {
635 shca
->sport
[0].port_state
= IB_PORT_DOWN
;
636 ret
= ehca_create_aqp1(shca
, 1);
638 ehca_err(&shca
->ib_device
,
639 "Cannot create AQP1 for port 1.");
644 /* create AQP1 for port 2 */
645 if ((ehca_open_aqp1
== 1) && (shca
->num_ports
== 2)) {
646 shca
->sport
[1].port_state
= IB_PORT_DOWN
;
647 ret
= ehca_create_aqp1(shca
, 2);
649 ehca_err(&shca
->ib_device
,
650 "Cannot create AQP1 for port 2.");
655 ret
= sysfs_create_group(&dev
->ofdev
.dev
.kobj
, &ehca_dev_attr_grp
);
656 if (ret
) /* only complain; we can live without attributes */
657 ehca_err(&shca
->ib_device
,
658 "Cannot create device attributes ret=%d", ret
);
660 spin_lock(&shca_list_lock
);
661 list_add(&shca
->shca_list
, &shca_list
);
662 spin_unlock(&shca_list_lock
);
667 ret
= ehca_destroy_aqp1(&shca
->sport
[0]);
669 ehca_err(&shca
->ib_device
,
670 "Cannot destroy AQP1 for port 1. ret=%x", ret
);
673 ib_unregister_device(&shca
->ib_device
);
676 ret
= ehca_dereg_internal_maxmr(shca
);
678 ehca_err(&shca
->ib_device
,
679 "Cannot destroy internal MR. ret=%x", ret
);
682 ret
= ehca_dealloc_pd(&shca
->pd
->ib_pd
);
684 ehca_err(&shca
->ib_device
,
685 "Cannot destroy internal PD. ret=%x", ret
);
688 ret
= ehca_destroy_eq(shca
, &shca
->neq
);
690 ehca_err(&shca
->ib_device
,
691 "Cannot destroy NEQ. ret=%x", ret
);
694 ret
= ehca_destroy_eq(shca
, &shca
->eq
);
696 ehca_err(&shca
->ib_device
,
697 "Cannot destroy EQ. ret=%x", ret
);
700 ib_dealloc_device(&shca
->ib_device
);
705 static int __devexit
ehca_remove(struct ibmebus_dev
*dev
)
707 struct ehca_shca
*shca
= dev
->ofdev
.dev
.driver_data
;
710 sysfs_remove_group(&dev
->ofdev
.dev
.kobj
, &ehca_dev_attr_grp
);
712 if (ehca_open_aqp1
== 1) {
714 for (i
= 0; i
< shca
->num_ports
; i
++) {
715 ret
= ehca_destroy_aqp1(&shca
->sport
[i
]);
717 ehca_err(&shca
->ib_device
,
718 "Cannot destroy AQP1 for port %x "
723 ib_unregister_device(&shca
->ib_device
);
725 ret
= ehca_dereg_internal_maxmr(shca
);
727 ehca_err(&shca
->ib_device
,
728 "Cannot destroy internal MR. ret=%x", ret
);
730 ret
= ehca_dealloc_pd(&shca
->pd
->ib_pd
);
732 ehca_err(&shca
->ib_device
,
733 "Cannot destroy internal PD. ret=%x", ret
);
735 ret
= ehca_destroy_eq(shca
, &shca
->eq
);
737 ehca_err(&shca
->ib_device
, "Cannot destroy EQ. ret=%x", ret
);
739 ret
= ehca_destroy_eq(shca
, &shca
->neq
);
741 ehca_err(&shca
->ib_device
, "Canot destroy NEQ. ret=%x", ret
);
743 ib_dealloc_device(&shca
->ib_device
);
745 spin_lock(&shca_list_lock
);
746 list_del(&shca
->shca_list
);
747 spin_unlock(&shca_list_lock
);
752 static struct of_device_id ehca_device_table
[] =
756 .compatible
= "IBM,lhca",
761 static struct ibmebus_driver ehca_driver
= {
763 .id_table
= ehca_device_table
,
765 .remove
= ehca_remove
,
768 void ehca_poll_eqs(unsigned long data
)
770 struct ehca_shca
*shca
;
772 spin_lock(&shca_list_lock
);
773 list_for_each_entry(shca
, &shca_list
, shca_list
) {
774 if (shca
->eq
.is_initialized
) {
775 /* call deadman proc only if eq ptr does not change */
776 struct ehca_eq
*eq
= &shca
->eq
;
778 volatile u64 q_ofs
, q_ofs2
;
780 spin_lock_irqsave(&eq
->spinlock
, flags
);
781 q_ofs
= eq
->ipz_queue
.current_q_offset
;
782 spin_unlock_irqrestore(&eq
->spinlock
, flags
);
784 spin_lock_irqsave(&eq
->spinlock
, flags
);
785 q_ofs2
= eq
->ipz_queue
.current_q_offset
;
786 spin_unlock_irqrestore(&eq
->spinlock
, flags
);
788 } while (q_ofs
== q_ofs2
&& max
> 0);
790 ehca_process_eq(shca
, 0);
793 mod_timer(&poll_eqs_timer
, jiffies
+ HZ
);
794 spin_unlock(&shca_list_lock
);
797 int __init
ehca_module_init(void)
801 printk(KERN_INFO
"eHCA Infiniband Device Driver "
802 "(Rel.: SVNEHCA_0023)\n");
803 idr_init(&ehca_qp_idr
);
804 idr_init(&ehca_cq_idr
);
805 spin_lock_init(&ehca_qp_idr_lock
);
806 spin_lock_init(&ehca_cq_idr_lock
);
807 spin_lock_init(&hcall_lock
);
809 INIT_LIST_HEAD(&shca_list
);
810 spin_lock_init(&shca_list_lock
);
812 if ((ret
= ehca_create_comp_pool())) {
813 ehca_gen_err("Cannot create comp pool.");
817 if ((ret
= ehca_create_slab_caches())) {
818 ehca_gen_err("Cannot create SLAB caches");
823 if ((ret
= ibmebus_register_driver(&ehca_driver
))) {
824 ehca_gen_err("Cannot register eHCA device driver");
829 ret
= sysfs_create_group(&ehca_driver
.driver
.kobj
, &ehca_drv_attr_grp
);
830 if (ret
) /* only complain; we can live without attributes */
831 ehca_gen_err("Cannot create driver attributes ret=%d", ret
);
833 if (ehca_poll_all_eqs
!= 1) {
834 ehca_gen_err("WARNING!!!");
835 ehca_gen_err("It is possible to lose interrupts.");
837 init_timer(&poll_eqs_timer
);
838 poll_eqs_timer
.function
= ehca_poll_eqs
;
839 poll_eqs_timer
.expires
= jiffies
+ HZ
;
840 add_timer(&poll_eqs_timer
);
846 ehca_destroy_slab_caches();
849 ehca_destroy_comp_pool();
853 void __exit
ehca_module_exit(void)
855 if (ehca_poll_all_eqs
== 1)
856 del_timer_sync(&poll_eqs_timer
);
858 sysfs_remove_group(&ehca_driver
.driver
.kobj
, &ehca_drv_attr_grp
);
859 ibmebus_unregister_driver(&ehca_driver
);
861 ehca_destroy_slab_caches();
863 ehca_destroy_comp_pool();
865 idr_destroy(&ehca_cq_idr
);
866 idr_destroy(&ehca_qp_idr
);
869 module_init(ehca_module_init
);
870 module_exit(ehca_module_exit
);