allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / drivers / infiniband / hw / ehca / ehca_av.c
blob0d6e2c4bb2451f27f78dfea5aa191c1f66cebe58
1 /*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * adress vector functions
6 * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
7 * Khadija Souissi <souissik@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 * Christoph Raisch <raisch@de.ibm.com>
11 * Copyright (c) 2005 IBM Corporation
13 * All rights reserved.
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
18 * OpenIB BSD License
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
45 #include <asm/current.h>
47 #include "ehca_tools.h"
48 #include "ehca_iverbs.h"
49 #include "hcp_if.h"
51 static struct kmem_cache *av_cache;
53 struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
55 int ret;
56 struct ehca_av *av;
57 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
58 ib_device);
60 av = kmem_cache_alloc(av_cache, GFP_KERNEL);
61 if (!av) {
62 ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
63 pd, ah_attr);
64 return ERR_PTR(-ENOMEM);
67 av->av.sl = ah_attr->sl;
68 av->av.dlid = ah_attr->dlid;
69 av->av.slid_path_bits = ah_attr->src_path_bits;
71 if (ehca_static_rate < 0) {
72 int ah_mult = ib_rate_to_mult(ah_attr->static_rate);
73 int ehca_mult =
74 ib_rate_to_mult(shca->sport[ah_attr->port_num].rate );
76 if (ah_mult >= ehca_mult)
77 av->av.ipd = 0;
78 else
79 av->av.ipd = (ah_mult > 0) ?
80 ((ehca_mult - 1) / ah_mult) : 0;
81 } else
82 av->av.ipd = ehca_static_rate;
84 av->av.lnh = ah_attr->ah_flags;
85 av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
86 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK,
87 ah_attr->grh.traffic_class);
88 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
89 ah_attr->grh.flow_label);
90 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
91 ah_attr->grh.hop_limit);
92 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B);
93 /* set sgid in grh.word_1 */
94 if (ah_attr->ah_flags & IB_AH_GRH) {
95 int rc;
96 struct ib_port_attr port_attr;
97 union ib_gid gid;
98 memset(&port_attr, 0, sizeof(port_attr));
99 rc = ehca_query_port(pd->device, ah_attr->port_num,
100 &port_attr);
101 if (rc) { /* invalid port number */
102 ret = -EINVAL;
103 ehca_err(pd->device, "Invalid port number "
104 "ehca_query_port() returned %x "
105 "pd=%p ah_attr=%p", rc, pd, ah_attr);
106 goto create_ah_exit1;
108 memset(&gid, 0, sizeof(gid));
109 rc = ehca_query_gid(pd->device,
110 ah_attr->port_num,
111 ah_attr->grh.sgid_index, &gid);
112 if (rc) {
113 ret = -EINVAL;
114 ehca_err(pd->device, "Failed to retrieve sgid "
115 "ehca_query_gid() returned %x "
116 "pd=%p ah_attr=%p", rc, pd, ah_attr);
117 goto create_ah_exit1;
119 memcpy(&av->av.grh.word_1, &gid, sizeof(gid));
121 av->av.pmtu = EHCA_MAX_MTU;
123 /* dgid comes in grh.word_3 */
124 memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
125 sizeof(ah_attr->grh.dgid));
127 return &av->ib_ah;
129 create_ah_exit1:
130 kmem_cache_free(av_cache, av);
132 return ERR_PTR(ret);
135 int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
137 struct ehca_av *av;
138 struct ehca_ud_av new_ehca_av;
139 struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
140 u32 cur_pid = current->tgid;
142 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
143 my_pd->ownpid != cur_pid) {
144 ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
145 cur_pid, my_pd->ownpid);
146 return -EINVAL;
149 memset(&new_ehca_av, 0, sizeof(new_ehca_av));
150 new_ehca_av.sl = ah_attr->sl;
151 new_ehca_av.dlid = ah_attr->dlid;
152 new_ehca_av.slid_path_bits = ah_attr->src_path_bits;
153 new_ehca_av.ipd = ah_attr->static_rate;
154 new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK,
155 (ah_attr->ah_flags & IB_AH_GRH) > 0);
156 new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK,
157 ah_attr->grh.traffic_class);
158 new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
159 ah_attr->grh.flow_label);
160 new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
161 ah_attr->grh.hop_limit);
162 new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b);
164 /* set sgid in grh.word_1 */
165 if (ah_attr->ah_flags & IB_AH_GRH) {
166 int rc;
167 struct ib_port_attr port_attr;
168 union ib_gid gid;
169 memset(&port_attr, 0, sizeof(port_attr));
170 rc = ehca_query_port(ah->device, ah_attr->port_num,
171 &port_attr);
172 if (rc) { /* invalid port number */
173 ehca_err(ah->device, "Invalid port number "
174 "ehca_query_port() returned %x "
175 "ah=%p ah_attr=%p port_num=%x",
176 rc, ah, ah_attr, ah_attr->port_num);
177 return -EINVAL;
179 memset(&gid, 0, sizeof(gid));
180 rc = ehca_query_gid(ah->device,
181 ah_attr->port_num,
182 ah_attr->grh.sgid_index, &gid);
183 if (rc) {
184 ehca_err(ah->device, "Failed to retrieve sgid "
185 "ehca_query_gid() returned %x "
186 "ah=%p ah_attr=%p port_num=%x "
187 "sgid_index=%x",
188 rc, ah, ah_attr, ah_attr->port_num,
189 ah_attr->grh.sgid_index);
190 return -EINVAL;
192 memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
195 new_ehca_av.pmtu = EHCA_MAX_MTU;
197 memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid,
198 sizeof(ah_attr->grh.dgid));
200 av = container_of(ah, struct ehca_av, ib_ah);
201 av->av = new_ehca_av;
203 return 0;
206 int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
208 struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
209 struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
210 u32 cur_pid = current->tgid;
212 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
213 my_pd->ownpid != cur_pid) {
214 ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
215 cur_pid, my_pd->ownpid);
216 return -EINVAL;
219 memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
220 sizeof(ah_attr->grh.dgid));
221 ah_attr->sl = av->av.sl;
223 ah_attr->dlid = av->av.dlid;
225 ah_attr->src_path_bits = av->av.slid_path_bits;
226 ah_attr->static_rate = av->av.ipd;
227 ah_attr->ah_flags = EHCA_BMASK_GET(GRH_FLAG_MASK, av->av.lnh);
228 ah_attr->grh.traffic_class = EHCA_BMASK_GET(GRH_TCLASS_MASK,
229 av->av.grh.word_0);
230 ah_attr->grh.hop_limit = EHCA_BMASK_GET(GRH_HOPLIMIT_MASK,
231 av->av.grh.word_0);
232 ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK,
233 av->av.grh.word_0);
235 return 0;
238 int ehca_destroy_ah(struct ib_ah *ah)
240 struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
241 u32 cur_pid = current->tgid;
243 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
244 my_pd->ownpid != cur_pid) {
245 ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
246 cur_pid, my_pd->ownpid);
247 return -EINVAL;
250 kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
252 return 0;
255 int ehca_init_av_cache(void)
257 av_cache = kmem_cache_create("ehca_cache_av",
258 sizeof(struct ehca_av), 0,
259 SLAB_HWCACHE_ALIGN,
260 NULL, NULL);
261 if (!av_cache)
262 return -ENOMEM;
263 return 0;
266 void ehca_cleanup_av_cache(void)
268 if (av_cache)
269 kmem_cache_destroy(av_cache);