Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / drivers / infiniband / hw / ehca / ehca_hca.c
blob5bd7b591987ed3c77b545197714858a62a470108
1 /*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * HCA query functions
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
9 * Copyright (c) 2005 IBM Corporation
11 * All rights reserved.
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
16 * OpenIB BSD License
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
42 #include "ehca_tools.h"
43 #include "ehca_iverbs.h"
44 #include "hcp_if.h"
46 int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
48 int i, ret = 0;
49 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
50 ib_device);
51 struct hipz_query_hca *rblock;
53 static const u32 cap_mapping[] = {
54 IB_DEVICE_RESIZE_MAX_WR, HCA_CAP_WQE_RESIZE,
55 IB_DEVICE_BAD_PKEY_CNTR, HCA_CAP_BAD_P_KEY_CTR,
56 IB_DEVICE_BAD_QKEY_CNTR, HCA_CAP_Q_KEY_VIOL_CTR,
57 IB_DEVICE_RAW_MULTI, HCA_CAP_RAW_PACKET_MCAST,
58 IB_DEVICE_AUTO_PATH_MIG, HCA_CAP_AUTO_PATH_MIG,
59 IB_DEVICE_CHANGE_PHY_PORT, HCA_CAP_SQD_RTS_PORT_CHANGE,
60 IB_DEVICE_UD_AV_PORT_ENFORCE, HCA_CAP_AH_PORT_NR_CHECK,
61 IB_DEVICE_CURR_QP_STATE_MOD, HCA_CAP_CUR_QP_STATE_MOD,
62 IB_DEVICE_SHUTDOWN_PORT, HCA_CAP_SHUTDOWN_PORT,
63 IB_DEVICE_INIT_TYPE, HCA_CAP_INIT_TYPE,
64 IB_DEVICE_PORT_ACTIVE_EVENT, HCA_CAP_PORT_ACTIVE_EVENT,
67 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
68 if (!rblock) {
69 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
70 return -ENOMEM;
73 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
74 ehca_err(&shca->ib_device, "Can't query device properties");
75 ret = -EINVAL;
76 goto query_device1;
79 memset(props, 0, sizeof(struct ib_device_attr));
80 props->page_size_cap = shca->hca_cap_mr_pgsize;
81 props->fw_ver = rblock->hw_ver;
82 props->max_mr_size = rblock->max_mr_size;
83 props->vendor_id = rblock->vendor_id >> 8;
84 props->vendor_part_id = rblock->vendor_part_id >> 16;
85 props->hw_ver = rblock->hw_ver;
86 props->max_qp = min_t(unsigned, rblock->max_qp, INT_MAX);
87 props->max_qp_wr = min_t(unsigned, rblock->max_wqes_wq, INT_MAX);
88 props->max_sge = min_t(unsigned, rblock->max_sge, INT_MAX);
89 props->max_sge_rd = min_t(unsigned, rblock->max_sge_rd, INT_MAX);
90 props->max_cq = min_t(unsigned, rblock->max_cq, INT_MAX);
91 props->max_cqe = min_t(unsigned, rblock->max_cqe, INT_MAX);
92 props->max_mr = min_t(unsigned, rblock->max_mr, INT_MAX);
93 props->max_mw = min_t(unsigned, rblock->max_mw, INT_MAX);
94 props->max_pd = min_t(unsigned, rblock->max_pd, INT_MAX);
95 props->max_ah = min_t(unsigned, rblock->max_ah, INT_MAX);
96 props->max_fmr = min_t(unsigned, rblock->max_mr, INT_MAX);
98 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
99 props->max_srq = props->max_qp;
100 props->max_srq_wr = props->max_qp_wr;
101 props->max_srq_sge = 3;
104 props->max_pkeys = 16;
105 props->local_ca_ack_delay
106 = rblock->local_ca_ack_delay;
107 props->max_raw_ipv6_qp
108 = min_t(unsigned, rblock->max_raw_ipv6_qp, INT_MAX);
109 props->max_raw_ethy_qp
110 = min_t(unsigned, rblock->max_raw_ethy_qp, INT_MAX);
111 props->max_mcast_grp
112 = min_t(unsigned, rblock->max_mcast_grp, INT_MAX);
113 props->max_mcast_qp_attach
114 = min_t(unsigned, rblock->max_mcast_qp_attach, INT_MAX);
115 props->max_total_mcast_qp_attach
116 = min_t(unsigned, rblock->max_total_mcast_qp_attach, INT_MAX);
118 /* translate device capabilities */
119 props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
120 IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ;
121 for (i = 0; i < ARRAY_SIZE(cap_mapping); i += 2)
122 if (rblock->hca_cap_indicators & cap_mapping[i + 1])
123 props->device_cap_flags |= cap_mapping[i];
125 query_device1:
126 ehca_free_fw_ctrlblock(rblock);
128 return ret;
131 int ehca_query_port(struct ib_device *ibdev,
132 u8 port, struct ib_port_attr *props)
134 int ret = 0;
135 u64 h_ret;
136 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
137 ib_device);
138 struct hipz_query_port *rblock;
140 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
141 if (!rblock) {
142 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
143 return -ENOMEM;
146 h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
147 if (h_ret != H_SUCCESS) {
148 ehca_err(&shca->ib_device, "Can't query port properties");
149 ret = -EINVAL;
150 goto query_port1;
153 memset(props, 0, sizeof(struct ib_port_attr));
155 switch (rblock->max_mtu) {
156 case 0x1:
157 props->active_mtu = props->max_mtu = IB_MTU_256;
158 break;
159 case 0x2:
160 props->active_mtu = props->max_mtu = IB_MTU_512;
161 break;
162 case 0x3:
163 props->active_mtu = props->max_mtu = IB_MTU_1024;
164 break;
165 case 0x4:
166 props->active_mtu = props->max_mtu = IB_MTU_2048;
167 break;
168 case 0x5:
169 props->active_mtu = props->max_mtu = IB_MTU_4096;
170 break;
171 default:
172 ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
173 rblock->max_mtu);
174 break;
177 props->port_cap_flags = rblock->capability_mask;
178 props->gid_tbl_len = rblock->gid_tbl_len;
179 props->max_msg_sz = rblock->max_msg_sz;
180 props->bad_pkey_cntr = rblock->bad_pkey_cntr;
181 props->qkey_viol_cntr = rblock->qkey_viol_cntr;
182 props->pkey_tbl_len = rblock->pkey_tbl_len;
183 props->lid = rblock->lid;
184 props->sm_lid = rblock->sm_lid;
185 props->lmc = rblock->lmc;
186 props->sm_sl = rblock->sm_sl;
187 props->subnet_timeout = rblock->subnet_timeout;
188 props->init_type_reply = rblock->init_type_reply;
190 if (rblock->state && rblock->phys_width) {
191 props->phys_state = rblock->phys_pstate;
192 props->state = rblock->phys_state;
193 props->active_width = rblock->phys_width;
194 props->active_speed = rblock->phys_speed;
195 } else {
196 /* old firmware releases don't report physical
197 * port info, so use default values
199 props->phys_state = 5;
200 props->state = rblock->state;
201 props->active_width = IB_WIDTH_12X;
202 props->active_speed = 0x1;
205 query_port1:
206 ehca_free_fw_ctrlblock(rblock);
208 return ret;
211 int ehca_query_sma_attr(struct ehca_shca *shca,
212 u8 port, struct ehca_sma_attr *attr)
214 int ret = 0;
215 u64 h_ret;
216 struct hipz_query_port *rblock;
218 rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
219 if (!rblock) {
220 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
221 return -ENOMEM;
224 h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
225 if (h_ret != H_SUCCESS) {
226 ehca_err(&shca->ib_device, "Can't query port properties");
227 ret = -EINVAL;
228 goto query_sma_attr1;
231 memset(attr, 0, sizeof(struct ehca_sma_attr));
233 attr->lid = rblock->lid;
234 attr->lmc = rblock->lmc;
235 attr->sm_sl = rblock->sm_sl;
236 attr->sm_lid = rblock->sm_lid;
238 attr->pkey_tbl_len = rblock->pkey_tbl_len;
239 memcpy(attr->pkeys, rblock->pkey_entries, sizeof(attr->pkeys));
241 query_sma_attr1:
242 ehca_free_fw_ctrlblock(rblock);
244 return ret;
247 int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
249 int ret = 0;
250 u64 h_ret;
251 struct ehca_shca *shca;
252 struct hipz_query_port *rblock;
254 shca = container_of(ibdev, struct ehca_shca, ib_device);
255 if (index > 16) {
256 ehca_err(&shca->ib_device, "Invalid index: %x.", index);
257 return -EINVAL;
260 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
261 if (!rblock) {
262 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
263 return -ENOMEM;
266 h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
267 if (h_ret != H_SUCCESS) {
268 ehca_err(&shca->ib_device, "Can't query port properties");
269 ret = -EINVAL;
270 goto query_pkey1;
273 memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16));
275 query_pkey1:
276 ehca_free_fw_ctrlblock(rblock);
278 return ret;
281 int ehca_query_gid(struct ib_device *ibdev, u8 port,
282 int index, union ib_gid *gid)
284 int ret = 0;
285 u64 h_ret;
286 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
287 ib_device);
288 struct hipz_query_port *rblock;
290 if (index > 255) {
291 ehca_err(&shca->ib_device, "Invalid index: %x.", index);
292 return -EINVAL;
295 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
296 if (!rblock) {
297 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
298 return -ENOMEM;
301 h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
302 if (h_ret != H_SUCCESS) {
303 ehca_err(&shca->ib_device, "Can't query port properties");
304 ret = -EINVAL;
305 goto query_gid1;
308 memcpy(&gid->raw[0], &rblock->gid_prefix, sizeof(u64));
309 memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64));
311 query_gid1:
312 ehca_free_fw_ctrlblock(rblock);
314 return ret;
317 const u32 allowed_port_caps = (
318 IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
319 IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
320 IB_PORT_VENDOR_CLASS_SUP);
322 int ehca_modify_port(struct ib_device *ibdev,
323 u8 port, int port_modify_mask,
324 struct ib_port_modify *props)
326 int ret = 0;
327 struct ehca_shca *shca;
328 struct hipz_query_port *rblock;
329 u32 cap;
330 u64 hret;
332 shca = container_of(ibdev, struct ehca_shca, ib_device);
333 if ((props->set_port_cap_mask | props->clr_port_cap_mask)
334 & ~allowed_port_caps) {
335 ehca_err(&shca->ib_device, "Non-changeable bits set in masks "
336 "set=%x clr=%x allowed=%x", props->set_port_cap_mask,
337 props->clr_port_cap_mask, allowed_port_caps);
338 return -EINVAL;
341 if (mutex_lock_interruptible(&shca->modify_mutex))
342 return -ERESTARTSYS;
344 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
345 if (!rblock) {
346 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
347 ret = -ENOMEM;
348 goto modify_port1;
351 hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
352 if (hret != H_SUCCESS) {
353 ehca_err(&shca->ib_device, "Can't query port properties");
354 ret = -EINVAL;
355 goto modify_port2;
358 cap = (rblock->capability_mask | props->set_port_cap_mask)
359 & ~props->clr_port_cap_mask;
361 hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
362 cap, props->init_type, port_modify_mask);
363 if (hret != H_SUCCESS) {
364 ehca_err(&shca->ib_device, "Modify port failed h_ret=%li",
365 hret);
366 ret = -EINVAL;
369 modify_port2:
370 ehca_free_fw_ctrlblock(rblock);
372 modify_port1:
373 mutex_unlock(&shca->modify_mutex);
375 return ret;