Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / drivers / infiniband / hw / mthca / mthca_av.c
blob4b111a852ff65b83b8479e6c3f6ce7fc79cb1ffe
1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
33 * $Id: mthca_av.c 1349 2004-12-16 21:09:43Z roland $
36 #include <linux/string.h>
37 #include <linux/slab.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_cache.h>
42 #include "mthca_dev.h"
44 enum {
45 MTHCA_RATE_TAVOR_FULL = 0,
46 MTHCA_RATE_TAVOR_1X = 1,
47 MTHCA_RATE_TAVOR_4X = 2,
48 MTHCA_RATE_TAVOR_1X_DDR = 3
51 enum {
52 MTHCA_RATE_MEMFREE_FULL = 0,
53 MTHCA_RATE_MEMFREE_QUARTER = 1,
54 MTHCA_RATE_MEMFREE_EIGHTH = 2,
55 MTHCA_RATE_MEMFREE_HALF = 3
58 struct mthca_av {
59 __be32 port_pd;
60 u8 reserved1;
61 u8 g_slid;
62 __be16 dlid;
63 u8 reserved2;
64 u8 gid_index;
65 u8 msg_sr;
66 u8 hop_limit;
67 __be32 sl_tclass_flowlabel;
68 __be32 dgid[4];
71 static enum ib_rate memfree_rate_to_ib(u8 mthca_rate, u8 port_rate)
73 switch (mthca_rate) {
74 case MTHCA_RATE_MEMFREE_EIGHTH:
75 return mult_to_ib_rate(port_rate >> 3);
76 case MTHCA_RATE_MEMFREE_QUARTER:
77 return mult_to_ib_rate(port_rate >> 2);
78 case MTHCA_RATE_MEMFREE_HALF:
79 return mult_to_ib_rate(port_rate >> 1);
80 case MTHCA_RATE_MEMFREE_FULL:
81 default:
82 return mult_to_ib_rate(port_rate);
86 static enum ib_rate tavor_rate_to_ib(u8 mthca_rate, u8 port_rate)
88 switch (mthca_rate) {
89 case MTHCA_RATE_TAVOR_1X: return IB_RATE_2_5_GBPS;
90 case MTHCA_RATE_TAVOR_1X_DDR: return IB_RATE_5_GBPS;
91 case MTHCA_RATE_TAVOR_4X: return IB_RATE_10_GBPS;
92 default: return mult_to_ib_rate(port_rate);
96 enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port)
98 if (mthca_is_memfree(dev)) {
99 /* Handle old Arbel FW */
100 if (dev->limits.stat_rate_support == 0x3 && mthca_rate)
101 return IB_RATE_2_5_GBPS;
103 return memfree_rate_to_ib(mthca_rate, dev->rate[port - 1]);
104 } else
105 return tavor_rate_to_ib(mthca_rate, dev->rate[port - 1]);
108 static u8 ib_rate_to_memfree(u8 req_rate, u8 cur_rate)
110 if (cur_rate <= req_rate)
111 return 0;
114 * Inter-packet delay (IPD) to get from rate X down to a rate
115 * no more than Y is (X - 1) / Y.
117 switch ((cur_rate - 1) / req_rate) {
118 case 0: return MTHCA_RATE_MEMFREE_FULL;
119 case 1: return MTHCA_RATE_MEMFREE_HALF;
120 case 2: /* fall through */
121 case 3: return MTHCA_RATE_MEMFREE_QUARTER;
122 default: return MTHCA_RATE_MEMFREE_EIGHTH;
126 static u8 ib_rate_to_tavor(u8 static_rate)
128 switch (static_rate) {
129 case IB_RATE_2_5_GBPS: return MTHCA_RATE_TAVOR_1X;
130 case IB_RATE_5_GBPS: return MTHCA_RATE_TAVOR_1X_DDR;
131 case IB_RATE_10_GBPS: return MTHCA_RATE_TAVOR_4X;
132 default: return MTHCA_RATE_TAVOR_FULL;
136 u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port)
138 u8 rate;
140 if (!static_rate || ib_rate_to_mult(static_rate) >= dev->rate[port - 1])
141 return 0;
143 if (mthca_is_memfree(dev))
144 rate = ib_rate_to_memfree(ib_rate_to_mult(static_rate),
145 dev->rate[port - 1]);
146 else
147 rate = ib_rate_to_tavor(static_rate);
149 if (!(dev->limits.stat_rate_support & (1 << rate)))
150 rate = 1;
152 return rate;
155 int mthca_create_ah(struct mthca_dev *dev,
156 struct mthca_pd *pd,
157 struct ib_ah_attr *ah_attr,
158 struct mthca_ah *ah)
160 u32 index = -1;
161 struct mthca_av *av = NULL;
163 ah->type = MTHCA_AH_PCI_POOL;
165 if (mthca_is_memfree(dev)) {
166 ah->av = kmalloc(sizeof *ah->av, GFP_ATOMIC);
167 if (!ah->av)
168 return -ENOMEM;
170 ah->type = MTHCA_AH_KMALLOC;
171 av = ah->av;
172 } else if (!atomic_read(&pd->sqp_count) &&
173 !(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
174 index = mthca_alloc(&dev->av_table.alloc);
176 /* fall back to allocate in host memory */
177 if (index == -1)
178 goto on_hca_fail;
180 av = kmalloc(sizeof *av, GFP_ATOMIC);
181 if (!av)
182 goto on_hca_fail;
184 ah->type = MTHCA_AH_ON_HCA;
185 ah->avdma = dev->av_table.ddr_av_base +
186 index * MTHCA_AV_SIZE;
189 on_hca_fail:
190 if (ah->type == MTHCA_AH_PCI_POOL) {
191 ah->av = pci_pool_alloc(dev->av_table.pool,
192 GFP_ATOMIC, &ah->avdma);
193 if (!ah->av)
194 return -ENOMEM;
196 av = ah->av;
199 ah->key = pd->ntmr.ibmr.lkey;
201 memset(av, 0, MTHCA_AV_SIZE);
203 av->port_pd = cpu_to_be32(pd->pd_num | (ah_attr->port_num << 24));
204 av->g_slid = ah_attr->src_path_bits;
205 av->dlid = cpu_to_be16(ah_attr->dlid);
206 av->msg_sr = (3 << 4) | /* 2K message */
207 mthca_get_rate(dev, ah_attr->static_rate, ah_attr->port_num);
208 av->sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
209 if (ah_attr->ah_flags & IB_AH_GRH) {
210 av->g_slid |= 0x80;
211 av->gid_index = (ah_attr->port_num - 1) * dev->limits.gid_table_len +
212 ah_attr->grh.sgid_index;
213 av->hop_limit = ah_attr->grh.hop_limit;
214 av->sl_tclass_flowlabel |=
215 cpu_to_be32((ah_attr->grh.traffic_class << 20) |
216 ah_attr->grh.flow_label);
217 memcpy(av->dgid, ah_attr->grh.dgid.raw, 16);
218 } else {
219 /* Arbel workaround -- low byte of GID must be 2 */
220 av->dgid[3] = cpu_to_be32(2);
223 if (0) {
224 int j;
226 mthca_dbg(dev, "Created UDAV at %p/%08lx:\n",
227 av, (unsigned long) ah->avdma);
228 for (j = 0; j < 8; ++j)
229 printk(KERN_DEBUG " [%2x] %08x\n",
230 j * 4, be32_to_cpu(((__be32 *) av)[j]));
233 if (ah->type == MTHCA_AH_ON_HCA) {
234 memcpy_toio(dev->av_table.av_map + index * MTHCA_AV_SIZE,
235 av, MTHCA_AV_SIZE);
236 kfree(av);
239 return 0;
242 int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah)
244 switch (ah->type) {
245 case MTHCA_AH_ON_HCA:
246 mthca_free(&dev->av_table.alloc,
247 (ah->avdma - dev->av_table.ddr_av_base) /
248 MTHCA_AV_SIZE);
249 break;
251 case MTHCA_AH_PCI_POOL:
252 pci_pool_free(dev->av_table.pool, ah->av, ah->avdma);
253 break;
255 case MTHCA_AH_KMALLOC:
256 kfree(ah->av);
257 break;
260 return 0;
263 int mthca_ah_grh_present(struct mthca_ah *ah)
265 return !!(ah->av->g_slid & 0x80);
268 int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
269 struct ib_ud_header *header)
271 if (ah->type == MTHCA_AH_ON_HCA)
272 return -EINVAL;
274 header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
275 header->lrh.destination_lid = ah->av->dlid;
276 header->lrh.source_lid = cpu_to_be16(ah->av->g_slid & 0x7f);
277 if (mthca_ah_grh_present(ah)) {
278 header->grh.traffic_class =
279 (be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20) & 0xff;
280 header->grh.flow_label =
281 ah->av->sl_tclass_flowlabel & cpu_to_be32(0xfffff);
282 header->grh.hop_limit = ah->av->hop_limit;
283 ib_get_cached_gid(&dev->ib_dev,
284 be32_to_cpu(ah->av->port_pd) >> 24,
285 ah->av->gid_index % dev->limits.gid_table_len,
286 &header->grh.source_gid);
287 memcpy(header->grh.destination_gid.raw,
288 ah->av->dgid, 16);
291 return 0;
294 int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr)
296 struct mthca_ah *ah = to_mah(ibah);
297 struct mthca_dev *dev = to_mdev(ibah->device);
299 /* Only implement for MAD and memfree ah for now. */
300 if (ah->type == MTHCA_AH_ON_HCA)
301 return -ENOSYS;
303 memset(attr, 0, sizeof *attr);
304 attr->dlid = be16_to_cpu(ah->av->dlid);
305 attr->sl = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
306 attr->port_num = be32_to_cpu(ah->av->port_pd) >> 24;
307 attr->static_rate = mthca_rate_to_ib(dev, ah->av->msg_sr & 0x7,
308 attr->port_num);
309 attr->src_path_bits = ah->av->g_slid & 0x7F;
310 attr->ah_flags = mthca_ah_grh_present(ah) ? IB_AH_GRH : 0;
312 if (attr->ah_flags) {
313 attr->grh.traffic_class =
314 be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20;
315 attr->grh.flow_label =
316 be32_to_cpu(ah->av->sl_tclass_flowlabel) & 0xfffff;
317 attr->grh.hop_limit = ah->av->hop_limit;
318 attr->grh.sgid_index = ah->av->gid_index &
319 (dev->limits.gid_table_len - 1);
320 memcpy(attr->grh.dgid.raw, ah->av->dgid, 16);
323 return 0;
326 int mthca_init_av_table(struct mthca_dev *dev)
328 int err;
330 if (mthca_is_memfree(dev))
331 return 0;
333 err = mthca_alloc_init(&dev->av_table.alloc,
334 dev->av_table.num_ddr_avs,
335 dev->av_table.num_ddr_avs - 1,
337 if (err)
338 return err;
340 dev->av_table.pool = pci_pool_create("mthca_av", dev->pdev,
341 MTHCA_AV_SIZE,
342 MTHCA_AV_SIZE, 0);
343 if (!dev->av_table.pool)
344 goto out_free_alloc;
346 if (!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
347 dev->av_table.av_map = ioremap(pci_resource_start(dev->pdev, 4) +
348 dev->av_table.ddr_av_base -
349 dev->ddr_start,
350 dev->av_table.num_ddr_avs *
351 MTHCA_AV_SIZE);
352 if (!dev->av_table.av_map)
353 goto out_free_pool;
354 } else
355 dev->av_table.av_map = NULL;
357 return 0;
359 out_free_pool:
360 pci_pool_destroy(dev->av_table.pool);
362 out_free_alloc:
363 mthca_alloc_cleanup(&dev->av_table.alloc);
364 return -ENOMEM;
367 void mthca_cleanup_av_table(struct mthca_dev *dev)
369 if (mthca_is_memfree(dev))
370 return;
372 if (dev->av_table.av_map)
373 iounmap(dev->av_table.av_map);
374 pci_pool_destroy(dev->av_table.pool);
375 mthca_alloc_cleanup(&dev->av_table.alloc);