added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / drivers / infiniband / core / mad_priv.h
blob05ce331733b069413c69d1ff7d9a2c00f4231cd2
1 /*
2 * Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
35 #ifndef __IB_MAD_PRIV_H__
36 #define __IB_MAD_PRIV_H__
38 #include <linux/completion.h>
39 #include <linux/err.h>
40 #include <linux/workqueue.h>
41 #include <rdma/ib_mad.h>
42 #include <rdma/ib_smi.h>
45 #define PFX "ib_mad: "
47 #define IB_MAD_QPS_CORE 2 /* Always QP0 and QP1 as a minimum */
49 /* QP and CQ parameters */
50 #define IB_MAD_QP_SEND_SIZE 128
51 #define IB_MAD_QP_RECV_SIZE 512
52 #define IB_MAD_SEND_REQ_MAX_SG 2
53 #define IB_MAD_RECV_REQ_MAX_SG 1
55 #define IB_MAD_SEND_Q_PSN 0
57 /* Registration table sizes */
58 #define MAX_MGMT_CLASS 80
59 #define MAX_MGMT_VERSION 8
60 #define MAX_MGMT_OUI 8
61 #define MAX_MGMT_VENDOR_RANGE2 (IB_MGMT_CLASS_VENDOR_RANGE2_END - \
62 IB_MGMT_CLASS_VENDOR_RANGE2_START + 1)
64 struct ib_mad_list_head {
65 struct list_head list;
66 struct ib_mad_queue *mad_queue;
69 struct ib_mad_private_header {
70 struct ib_mad_list_head mad_list;
71 struct ib_mad_recv_wc recv_wc;
72 struct ib_wc wc;
73 u64 mapping;
74 } __attribute__ ((packed));
76 struct ib_mad_private {
77 struct ib_mad_private_header header;
78 struct ib_grh grh;
79 union {
80 struct ib_mad mad;
81 struct ib_rmpp_mad rmpp_mad;
82 struct ib_smp smp;
83 } mad;
84 } __attribute__ ((packed));
86 struct ib_rmpp_segment {
87 struct list_head list;
88 u32 num;
89 u8 data[0];
92 struct ib_mad_agent_private {
93 struct list_head agent_list;
94 struct ib_mad_agent agent;
95 struct ib_mad_reg_req *reg_req;
96 struct ib_mad_qp_info *qp_info;
98 spinlock_t lock;
99 struct list_head send_list;
100 struct list_head wait_list;
101 struct list_head done_list;
102 struct delayed_work timed_work;
103 unsigned long timeout;
104 struct list_head local_list;
105 struct work_struct local_work;
106 struct list_head rmpp_list;
108 atomic_t refcount;
109 struct completion comp;
112 struct ib_mad_snoop_private {
113 struct ib_mad_agent agent;
114 struct ib_mad_qp_info *qp_info;
115 int snoop_index;
116 int mad_snoop_flags;
117 atomic_t refcount;
118 struct completion comp;
121 struct ib_mad_send_wr_private {
122 struct ib_mad_list_head mad_list;
123 struct list_head agent_list;
124 struct ib_mad_agent_private *mad_agent_priv;
125 struct ib_mad_send_buf send_buf;
126 u64 header_mapping;
127 u64 payload_mapping;
128 struct ib_send_wr send_wr;
129 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
130 __be64 tid;
131 unsigned long timeout;
132 int max_retries;
133 int retries_left;
134 int retry;
135 int refcount;
136 enum ib_wc_status status;
138 /* RMPP control */
139 struct list_head rmpp_list;
140 struct ib_rmpp_segment *last_ack_seg;
141 struct ib_rmpp_segment *cur_seg;
142 int last_ack;
143 int seg_num;
144 int newwin;
145 int pad;
148 struct ib_mad_local_private {
149 struct list_head completion_list;
150 struct ib_mad_private *mad_priv;
151 struct ib_mad_agent_private *recv_mad_agent;
152 struct ib_mad_send_wr_private *mad_send_wr;
155 struct ib_mad_mgmt_method_table {
156 struct ib_mad_agent_private *agent[IB_MGMT_MAX_METHODS];
159 struct ib_mad_mgmt_class_table {
160 struct ib_mad_mgmt_method_table *method_table[MAX_MGMT_CLASS];
163 struct ib_mad_mgmt_vendor_class {
164 u8 oui[MAX_MGMT_OUI][3];
165 struct ib_mad_mgmt_method_table *method_table[MAX_MGMT_OUI];
168 struct ib_mad_mgmt_vendor_class_table {
169 struct ib_mad_mgmt_vendor_class *vendor_class[MAX_MGMT_VENDOR_RANGE2];
172 struct ib_mad_mgmt_version_table {
173 struct ib_mad_mgmt_class_table *class;
174 struct ib_mad_mgmt_vendor_class_table *vendor;
177 struct ib_mad_queue {
178 spinlock_t lock;
179 struct list_head list;
180 int count;
181 int max_active;
182 struct ib_mad_qp_info *qp_info;
185 struct ib_mad_qp_info {
186 struct ib_mad_port_private *port_priv;
187 struct ib_qp *qp;
188 struct ib_mad_queue send_queue;
189 struct ib_mad_queue recv_queue;
190 struct list_head overflow_list;
191 spinlock_t snoop_lock;
192 struct ib_mad_snoop_private **snoop_table;
193 int snoop_table_size;
194 atomic_t snoop_count;
197 struct ib_mad_port_private {
198 struct list_head port_list;
199 struct ib_device *device;
200 int port_num;
201 struct ib_cq *cq;
202 struct ib_pd *pd;
203 struct ib_mr *mr;
205 spinlock_t reg_lock;
206 struct ib_mad_mgmt_version_table version[MAX_MGMT_VERSION];
207 struct list_head agent_list;
208 struct workqueue_struct *wq;
209 struct work_struct work;
210 struct ib_mad_qp_info qp_info[IB_MAD_QPS_CORE];
213 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr);
215 struct ib_mad_send_wr_private *
216 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
217 struct ib_mad_recv_wc *mad_recv_wc);
219 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
220 struct ib_mad_send_wc *mad_send_wc);
222 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr);
224 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
225 int timeout_ms);
227 #endif /* __IB_MAD_PRIV_H__ */