epca.c: reformat comments and coding style improvements
[linux-2.6/openmoko-kernel.git] / drivers / net / mlx4 / main.c
blob89b3f0b7cdc0f81a42a79eb2ecca2df0092e78ea
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
42 #include <linux/mlx4/device.h>
43 #include <linux/mlx4/doorbell.h>
45 #include "mlx4.h"
46 #include "fw.h"
47 #include "icm.h"
49 MODULE_AUTHOR("Roland Dreier");
50 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
51 MODULE_LICENSE("Dual BSD/GPL");
52 MODULE_VERSION(DRV_VERSION);
54 #ifdef CONFIG_MLX4_DEBUG
56 int mlx4_debug_level = 0;
57 module_param_named(debug_level, mlx4_debug_level, int, 0644);
58 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
60 #endif /* CONFIG_MLX4_DEBUG */
62 #ifdef CONFIG_PCI_MSI
64 static int msi_x = 1;
65 module_param(msi_x, int, 0444);
66 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
68 #else /* CONFIG_PCI_MSI */
70 #define msi_x (0)
72 #endif /* CONFIG_PCI_MSI */
74 static const char mlx4_version[] __devinitdata =
75 DRV_NAME ": Mellanox ConnectX core driver v"
76 DRV_VERSION " (" DRV_RELDATE ")\n";
78 static struct mlx4_profile default_profile = {
79 .num_qp = 1 << 16,
80 .num_srq = 1 << 16,
81 .rdmarc_per_qp = 1 << 4,
82 .num_cq = 1 << 16,
83 .num_mcg = 1 << 13,
84 .num_mpt = 1 << 17,
85 .num_mtt = 1 << 20,
88 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
90 int err;
91 int i;
93 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
94 if (err) {
95 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
96 return err;
99 if (dev_cap->min_page_sz > PAGE_SIZE) {
100 mlx4_err(dev, "HCA minimum page size of %d bigger than "
101 "kernel PAGE_SIZE of %ld, aborting.\n",
102 dev_cap->min_page_sz, PAGE_SIZE);
103 return -ENODEV;
105 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
106 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
107 "aborting.\n",
108 dev_cap->num_ports, MLX4_MAX_PORTS);
109 return -ENODEV;
112 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
113 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
114 "PCI resource 2 size of 0x%llx, aborting.\n",
115 dev_cap->uar_size,
116 (unsigned long long) pci_resource_len(dev->pdev, 2));
117 return -ENODEV;
120 dev->caps.num_ports = dev_cap->num_ports;
121 for (i = 1; i <= dev->caps.num_ports; ++i) {
122 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
123 dev->caps.mtu_cap[i] = dev_cap->max_mtu[i];
124 dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
125 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
126 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
129 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
130 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
131 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
132 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
133 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
134 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
135 dev->caps.max_wqes = dev_cap->max_qp_sz;
136 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
137 dev->caps.reserved_qps = dev_cap->reserved_qps;
138 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
139 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
140 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
141 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
142 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
143 dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM;
145 * Subtract 1 from the limit because we need to allocate a
146 * spare CQE so the HCA HW can tell the difference between an
147 * empty CQ and a full CQ.
149 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
150 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
151 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
152 dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
153 MLX4_MTT_ENTRY_PER_SEG);
154 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
155 dev->caps.reserved_uars = dev_cap->reserved_uars;
156 dev->caps.reserved_pds = dev_cap->reserved_pds;
157 dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
158 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
159 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
160 dev->caps.flags = dev_cap->flags;
161 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
163 return 0;
166 static int __devinit mlx4_load_fw(struct mlx4_dev *dev)
168 struct mlx4_priv *priv = mlx4_priv(dev);
169 int err;
171 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
172 GFP_HIGHUSER | __GFP_NOWARN, 0);
173 if (!priv->fw.fw_icm) {
174 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
175 return -ENOMEM;
178 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
179 if (err) {
180 mlx4_err(dev, "MAP_FA command failed, aborting.\n");
181 goto err_free;
184 err = mlx4_RUN_FW(dev);
185 if (err) {
186 mlx4_err(dev, "RUN_FW command failed, aborting.\n");
187 goto err_unmap_fa;
190 return 0;
192 err_unmap_fa:
193 mlx4_UNMAP_FA(dev);
195 err_free:
196 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
197 return err;
200 static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
201 int cmpt_entry_sz)
203 struct mlx4_priv *priv = mlx4_priv(dev);
204 int err;
206 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
207 cmpt_base +
208 ((u64) (MLX4_CMPT_TYPE_QP *
209 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
210 cmpt_entry_sz, dev->caps.num_qps,
211 dev->caps.reserved_qps, 0, 0);
212 if (err)
213 goto err;
215 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
216 cmpt_base +
217 ((u64) (MLX4_CMPT_TYPE_SRQ *
218 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
219 cmpt_entry_sz, dev->caps.num_srqs,
220 dev->caps.reserved_srqs, 0, 0);
221 if (err)
222 goto err_qp;
224 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
225 cmpt_base +
226 ((u64) (MLX4_CMPT_TYPE_CQ *
227 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
228 cmpt_entry_sz, dev->caps.num_cqs,
229 dev->caps.reserved_cqs, 0, 0);
230 if (err)
231 goto err_srq;
233 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
234 cmpt_base +
235 ((u64) (MLX4_CMPT_TYPE_EQ *
236 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
237 cmpt_entry_sz,
238 roundup_pow_of_two(MLX4_NUM_EQ +
239 dev->caps.reserved_eqs),
240 MLX4_NUM_EQ + dev->caps.reserved_eqs, 0, 0);
241 if (err)
242 goto err_cq;
244 return 0;
246 err_cq:
247 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
249 err_srq:
250 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
252 err_qp:
253 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
255 err:
256 return err;
259 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
260 struct mlx4_init_hca_param *init_hca, u64 icm_size)
262 struct mlx4_priv *priv = mlx4_priv(dev);
263 u64 aux_pages;
264 int err;
266 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
267 if (err) {
268 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
269 return err;
272 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
273 (unsigned long long) icm_size >> 10,
274 (unsigned long long) aux_pages << 2);
276 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
277 GFP_HIGHUSER | __GFP_NOWARN, 0);
278 if (!priv->fw.aux_icm) {
279 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
280 return -ENOMEM;
283 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
284 if (err) {
285 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
286 goto err_free_aux;
289 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
290 if (err) {
291 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
292 goto err_unmap_aux;
295 err = mlx4_map_eq_icm(dev, init_hca->eqc_base);
296 if (err) {
297 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
298 goto err_unmap_cmpt;
302 * Reserved MTT entries must be aligned up to a cacheline
303 * boundary, since the FW will write to them, while the driver
304 * writes to all other MTT entries. (The variable
305 * dev->caps.mtt_entry_sz below is really the MTT segment
306 * size, not the raw entry size)
308 dev->caps.reserved_mtts =
309 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
310 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
312 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
313 init_hca->mtt_base,
314 dev->caps.mtt_entry_sz,
315 dev->caps.num_mtt_segs,
316 dev->caps.reserved_mtts, 1, 0);
317 if (err) {
318 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
319 goto err_unmap_eq;
322 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
323 init_hca->dmpt_base,
324 dev_cap->dmpt_entry_sz,
325 dev->caps.num_mpts,
326 dev->caps.reserved_mrws, 1, 1);
327 if (err) {
328 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
329 goto err_unmap_mtt;
332 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
333 init_hca->qpc_base,
334 dev_cap->qpc_entry_sz,
335 dev->caps.num_qps,
336 dev->caps.reserved_qps, 0, 0);
337 if (err) {
338 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
339 goto err_unmap_dmpt;
342 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
343 init_hca->auxc_base,
344 dev_cap->aux_entry_sz,
345 dev->caps.num_qps,
346 dev->caps.reserved_qps, 0, 0);
347 if (err) {
348 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
349 goto err_unmap_qp;
352 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
353 init_hca->altc_base,
354 dev_cap->altc_entry_sz,
355 dev->caps.num_qps,
356 dev->caps.reserved_qps, 0, 0);
357 if (err) {
358 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
359 goto err_unmap_auxc;
362 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
363 init_hca->rdmarc_base,
364 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
365 dev->caps.num_qps,
366 dev->caps.reserved_qps, 0, 0);
367 if (err) {
368 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
369 goto err_unmap_altc;
372 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
373 init_hca->cqc_base,
374 dev_cap->cqc_entry_sz,
375 dev->caps.num_cqs,
376 dev->caps.reserved_cqs, 0, 0);
377 if (err) {
378 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
379 goto err_unmap_rdmarc;
382 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
383 init_hca->srqc_base,
384 dev_cap->srq_entry_sz,
385 dev->caps.num_srqs,
386 dev->caps.reserved_srqs, 0, 0);
387 if (err) {
388 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
389 goto err_unmap_cq;
393 * It's not strictly required, but for simplicity just map the
394 * whole multicast group table now. The table isn't very big
395 * and it's a lot easier than trying to track ref counts.
397 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
398 init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
399 dev->caps.num_mgms + dev->caps.num_amgms,
400 dev->caps.num_mgms + dev->caps.num_amgms,
401 0, 0);
402 if (err) {
403 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
404 goto err_unmap_srq;
407 return 0;
409 err_unmap_srq:
410 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
412 err_unmap_cq:
413 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
415 err_unmap_rdmarc:
416 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
418 err_unmap_altc:
419 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
421 err_unmap_auxc:
422 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
424 err_unmap_qp:
425 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
427 err_unmap_dmpt:
428 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
430 err_unmap_mtt:
431 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
433 err_unmap_eq:
434 mlx4_unmap_eq_icm(dev);
436 err_unmap_cmpt:
437 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
438 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
439 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
440 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
442 err_unmap_aux:
443 mlx4_UNMAP_ICM_AUX(dev);
445 err_free_aux:
446 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
448 return err;
451 static void mlx4_free_icms(struct mlx4_dev *dev)
453 struct mlx4_priv *priv = mlx4_priv(dev);
455 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
456 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
457 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
458 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
459 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
460 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
461 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
462 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
463 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
464 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
465 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
466 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
467 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
468 mlx4_unmap_eq_icm(dev);
470 mlx4_UNMAP_ICM_AUX(dev);
471 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
474 static void mlx4_close_hca(struct mlx4_dev *dev)
476 mlx4_CLOSE_HCA(dev, 0);
477 mlx4_free_icms(dev);
478 mlx4_UNMAP_FA(dev);
479 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
482 static int mlx4_init_hca(struct mlx4_dev *dev)
484 struct mlx4_priv *priv = mlx4_priv(dev);
485 struct mlx4_adapter adapter;
486 struct mlx4_dev_cap dev_cap;
487 struct mlx4_profile profile;
488 struct mlx4_init_hca_param init_hca;
489 u64 icm_size;
490 int err;
492 err = mlx4_QUERY_FW(dev);
493 if (err) {
494 mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
495 return err;
498 err = mlx4_load_fw(dev);
499 if (err) {
500 mlx4_err(dev, "Failed to start FW, aborting.\n");
501 return err;
504 err = mlx4_dev_cap(dev, &dev_cap);
505 if (err) {
506 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
507 goto err_stop_fw;
510 profile = default_profile;
512 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
513 if ((long long) icm_size < 0) {
514 err = icm_size;
515 goto err_stop_fw;
518 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
520 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
521 if (err)
522 goto err_stop_fw;
524 err = mlx4_INIT_HCA(dev, &init_hca);
525 if (err) {
526 mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
527 goto err_free_icm;
530 err = mlx4_QUERY_ADAPTER(dev, &adapter);
531 if (err) {
532 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
533 goto err_close;
536 priv->eq_table.inta_pin = adapter.inta_pin;
537 dev->rev_id = adapter.revision_id;
538 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
540 return 0;
542 err_close:
543 mlx4_close_hca(dev);
545 err_free_icm:
546 mlx4_free_icms(dev);
548 err_stop_fw:
549 mlx4_UNMAP_FA(dev);
550 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
552 return err;
555 static int mlx4_setup_hca(struct mlx4_dev *dev)
557 struct mlx4_priv *priv = mlx4_priv(dev);
558 int err;
560 err = mlx4_init_uar_table(dev);
561 if (err) {
562 mlx4_err(dev, "Failed to initialize "
563 "user access region table, aborting.\n");
564 return err;
567 err = mlx4_uar_alloc(dev, &priv->driver_uar);
568 if (err) {
569 mlx4_err(dev, "Failed to allocate driver access region, "
570 "aborting.\n");
571 goto err_uar_table_free;
574 priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
575 if (!priv->kar) {
576 mlx4_err(dev, "Couldn't map kernel access region, "
577 "aborting.\n");
578 err = -ENOMEM;
579 goto err_uar_free;
582 err = mlx4_init_pd_table(dev);
583 if (err) {
584 mlx4_err(dev, "Failed to initialize "
585 "protection domain table, aborting.\n");
586 goto err_kar_unmap;
589 err = mlx4_init_mr_table(dev);
590 if (err) {
591 mlx4_err(dev, "Failed to initialize "
592 "memory region table, aborting.\n");
593 goto err_pd_table_free;
596 err = mlx4_init_eq_table(dev);
597 if (err) {
598 mlx4_err(dev, "Failed to initialize "
599 "event queue table, aborting.\n");
600 goto err_mr_table_free;
603 err = mlx4_cmd_use_events(dev);
604 if (err) {
605 mlx4_err(dev, "Failed to switch to event-driven "
606 "firmware commands, aborting.\n");
607 goto err_eq_table_free;
610 err = mlx4_NOP(dev);
611 if (err) {
612 if (dev->flags & MLX4_FLAG_MSI_X) {
613 mlx4_warn(dev, "NOP command failed to generate MSI-X "
614 "interrupt IRQ %d).\n",
615 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
616 mlx4_warn(dev, "Trying again without MSI-X.\n");
617 } else {
618 mlx4_err(dev, "NOP command failed to generate interrupt "
619 "(IRQ %d), aborting.\n",
620 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
621 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
624 goto err_cmd_poll;
627 mlx4_dbg(dev, "NOP command IRQ test passed\n");
629 err = mlx4_init_cq_table(dev);
630 if (err) {
631 mlx4_err(dev, "Failed to initialize "
632 "completion queue table, aborting.\n");
633 goto err_cmd_poll;
636 err = mlx4_init_srq_table(dev);
637 if (err) {
638 mlx4_err(dev, "Failed to initialize "
639 "shared receive queue table, aborting.\n");
640 goto err_cq_table_free;
643 err = mlx4_init_qp_table(dev);
644 if (err) {
645 mlx4_err(dev, "Failed to initialize "
646 "queue pair table, aborting.\n");
647 goto err_srq_table_free;
650 err = mlx4_init_mcg_table(dev);
651 if (err) {
652 mlx4_err(dev, "Failed to initialize "
653 "multicast group table, aborting.\n");
654 goto err_qp_table_free;
657 return 0;
659 err_qp_table_free:
660 mlx4_cleanup_qp_table(dev);
662 err_srq_table_free:
663 mlx4_cleanup_srq_table(dev);
665 err_cq_table_free:
666 mlx4_cleanup_cq_table(dev);
668 err_cmd_poll:
669 mlx4_cmd_use_polling(dev);
671 err_eq_table_free:
672 mlx4_cleanup_eq_table(dev);
674 err_mr_table_free:
675 mlx4_cleanup_mr_table(dev);
677 err_pd_table_free:
678 mlx4_cleanup_pd_table(dev);
680 err_kar_unmap:
681 iounmap(priv->kar);
683 err_uar_free:
684 mlx4_uar_free(dev, &priv->driver_uar);
686 err_uar_table_free:
687 mlx4_cleanup_uar_table(dev);
688 return err;
691 static void __devinit mlx4_enable_msi_x(struct mlx4_dev *dev)
693 struct mlx4_priv *priv = mlx4_priv(dev);
694 struct msix_entry entries[MLX4_NUM_EQ];
695 int err;
696 int i;
698 if (msi_x) {
699 for (i = 0; i < MLX4_NUM_EQ; ++i)
700 entries[i].entry = i;
702 err = pci_enable_msix(dev->pdev, entries, ARRAY_SIZE(entries));
703 if (err) {
704 if (err > 0)
705 mlx4_info(dev, "Only %d MSI-X vectors available, "
706 "not using MSI-X\n", err);
707 goto no_msi;
710 for (i = 0; i < MLX4_NUM_EQ; ++i)
711 priv->eq_table.eq[i].irq = entries[i].vector;
713 dev->flags |= MLX4_FLAG_MSI_X;
714 return;
717 no_msi:
718 for (i = 0; i < MLX4_NUM_EQ; ++i)
719 priv->eq_table.eq[i].irq = dev->pdev->irq;
722 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
724 struct mlx4_priv *priv;
725 struct mlx4_dev *dev;
726 int err;
728 printk(KERN_INFO PFX "Initializing %s\n",
729 pci_name(pdev));
731 err = pci_enable_device(pdev);
732 if (err) {
733 dev_err(&pdev->dev, "Cannot enable PCI device, "
734 "aborting.\n");
735 return err;
739 * Check for BARs. We expect 0: 1MB, 2: 8MB, 4: DDR (may not
740 * be present)
742 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
743 pci_resource_len(pdev, 0) != 1 << 20) {
744 dev_err(&pdev->dev, "Missing DCS, aborting.\n");
745 err = -ENODEV;
746 goto err_disable_pdev;
748 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
749 dev_err(&pdev->dev, "Missing UAR, aborting.\n");
750 err = -ENODEV;
751 goto err_disable_pdev;
754 err = pci_request_region(pdev, 0, DRV_NAME);
755 if (err) {
756 dev_err(&pdev->dev, "Cannot request control region, aborting.\n");
757 goto err_disable_pdev;
760 err = pci_request_region(pdev, 2, DRV_NAME);
761 if (err) {
762 dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n");
763 goto err_release_bar0;
766 pci_set_master(pdev);
768 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
769 if (err) {
770 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
771 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
772 if (err) {
773 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
774 goto err_release_bar2;
777 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
778 if (err) {
779 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
780 "consistent PCI DMA mask.\n");
781 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
782 if (err) {
783 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
784 "aborting.\n");
785 goto err_release_bar2;
789 priv = kzalloc(sizeof *priv, GFP_KERNEL);
790 if (!priv) {
791 dev_err(&pdev->dev, "Device struct alloc failed, "
792 "aborting.\n");
793 err = -ENOMEM;
794 goto err_release_bar2;
797 dev = &priv->dev;
798 dev->pdev = pdev;
799 INIT_LIST_HEAD(&priv->ctx_list);
800 spin_lock_init(&priv->ctx_lock);
803 * Now reset the HCA before we touch the PCI capabilities or
804 * attempt a firmware command, since a boot ROM may have left
805 * the HCA in an undefined state.
807 err = mlx4_reset(dev);
808 if (err) {
809 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
810 goto err_free_dev;
813 if (mlx4_cmd_init(dev)) {
814 mlx4_err(dev, "Failed to init command interface, aborting.\n");
815 goto err_free_dev;
818 err = mlx4_init_hca(dev);
819 if (err)
820 goto err_cmd;
822 mlx4_enable_msi_x(dev);
824 err = mlx4_setup_hca(dev);
825 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
826 dev->flags &= ~MLX4_FLAG_MSI_X;
827 pci_disable_msix(pdev);
828 err = mlx4_setup_hca(dev);
831 if (err)
832 goto err_close;
834 err = mlx4_register_device(dev);
835 if (err)
836 goto err_cleanup;
838 pci_set_drvdata(pdev, dev);
840 return 0;
842 err_cleanup:
843 mlx4_cleanup_mcg_table(dev);
844 mlx4_cleanup_qp_table(dev);
845 mlx4_cleanup_srq_table(dev);
846 mlx4_cleanup_cq_table(dev);
847 mlx4_cmd_use_polling(dev);
848 mlx4_cleanup_eq_table(dev);
849 mlx4_cleanup_mr_table(dev);
850 mlx4_cleanup_pd_table(dev);
851 mlx4_cleanup_uar_table(dev);
853 err_close:
854 if (dev->flags & MLX4_FLAG_MSI_X)
855 pci_disable_msix(pdev);
857 mlx4_close_hca(dev);
859 err_cmd:
860 mlx4_cmd_cleanup(dev);
862 err_free_dev:
863 kfree(priv);
865 err_release_bar2:
866 pci_release_region(pdev, 2);
868 err_release_bar0:
869 pci_release_region(pdev, 0);
871 err_disable_pdev:
872 pci_disable_device(pdev);
873 pci_set_drvdata(pdev, NULL);
874 return err;
877 static int __devinit mlx4_init_one(struct pci_dev *pdev,
878 const struct pci_device_id *id)
880 static int mlx4_version_printed;
882 if (!mlx4_version_printed) {
883 printk(KERN_INFO "%s", mlx4_version);
884 ++mlx4_version_printed;
887 return __mlx4_init_one(pdev, id);
890 static void mlx4_remove_one(struct pci_dev *pdev)
892 struct mlx4_dev *dev = pci_get_drvdata(pdev);
893 struct mlx4_priv *priv = mlx4_priv(dev);
894 int p;
896 if (dev) {
897 mlx4_unregister_device(dev);
899 for (p = 1; p <= dev->caps.num_ports; ++p)
900 mlx4_CLOSE_PORT(dev, p);
902 mlx4_cleanup_mcg_table(dev);
903 mlx4_cleanup_qp_table(dev);
904 mlx4_cleanup_srq_table(dev);
905 mlx4_cleanup_cq_table(dev);
906 mlx4_cmd_use_polling(dev);
907 mlx4_cleanup_eq_table(dev);
908 mlx4_cleanup_mr_table(dev);
909 mlx4_cleanup_pd_table(dev);
911 iounmap(priv->kar);
912 mlx4_uar_free(dev, &priv->driver_uar);
913 mlx4_cleanup_uar_table(dev);
914 mlx4_close_hca(dev);
915 mlx4_cmd_cleanup(dev);
917 if (dev->flags & MLX4_FLAG_MSI_X)
918 pci_disable_msix(pdev);
920 kfree(priv);
921 pci_release_region(pdev, 2);
922 pci_release_region(pdev, 0);
923 pci_disable_device(pdev);
924 pci_set_drvdata(pdev, NULL);
928 int mlx4_restart_one(struct pci_dev *pdev)
930 mlx4_remove_one(pdev);
931 return __mlx4_init_one(pdev, NULL);
934 static struct pci_device_id mlx4_pci_table[] = {
935 { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
936 { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
937 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
938 { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
939 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
940 { 0, }
943 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
945 static struct pci_driver mlx4_driver = {
946 .name = DRV_NAME,
947 .id_table = mlx4_pci_table,
948 .probe = mlx4_init_one,
949 .remove = __devexit_p(mlx4_remove_one)
952 static int __init mlx4_init(void)
954 int ret;
956 ret = mlx4_catas_init();
957 if (ret)
958 return ret;
960 ret = pci_register_driver(&mlx4_driver);
961 return ret < 0 ? ret : 0;
964 static void __exit mlx4_cleanup(void)
966 pci_unregister_driver(&mlx4_driver);
967 mlx4_catas_cleanup();
970 module_init(mlx4_init);
971 module_exit(mlx4_cleanup);