Merge master.kernel.org:/home/rmk/linux-2.6-arm
[linux-2.6/linux-2.6-openrd.git] / include / rdma / ib_verbs.h
blob09509edb1c5fde72e0cec15d7a6f3391076c188b
1 /*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
39 #if !defined(IB_VERBS_H)
40 #define IB_VERBS_H
42 #include <linux/types.h>
43 #include <linux/device.h>
44 #include <linux/mm.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kref.h>
47 #include <linux/list.h>
48 #include <linux/rwsem.h>
49 #include <linux/scatterlist.h>
51 #include <asm/atomic.h>
52 #include <asm/uaccess.h>
54 union ib_gid {
55 u8 raw[16];
56 struct {
57 __be64 subnet_prefix;
58 __be64 interface_id;
59 } global;
62 enum rdma_node_type {
63 /* IB values map to NodeInfo:NodeType. */
64 RDMA_NODE_IB_CA = 1,
65 RDMA_NODE_IB_SWITCH,
66 RDMA_NODE_IB_ROUTER,
67 RDMA_NODE_RNIC
70 enum rdma_transport_type {
71 RDMA_TRANSPORT_IB,
72 RDMA_TRANSPORT_IWARP
75 enum rdma_transport_type
76 rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
78 enum ib_device_cap_flags {
79 IB_DEVICE_RESIZE_MAX_WR = 1,
80 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
81 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
82 IB_DEVICE_RAW_MULTI = (1<<3),
83 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
84 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
85 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
86 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
87 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
88 IB_DEVICE_INIT_TYPE = (1<<9),
89 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
90 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
91 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
92 IB_DEVICE_SRQ_RESIZE = (1<<13),
93 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
94 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15),
95 IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */
96 IB_DEVICE_MEM_WINDOW = (1<<17),
98 * Devices should set IB_DEVICE_UD_IP_SUM if they support
99 * insertion of UDP and TCP checksum on outgoing UD IPoIB
100 * messages and can verify the validity of checksum for
101 * incoming messages. Setting this flag implies that the
102 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
104 IB_DEVICE_UD_IP_CSUM = (1<<18),
105 IB_DEVICE_UD_TSO = (1<<19),
106 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
107 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
110 enum ib_atomic_cap {
111 IB_ATOMIC_NONE,
112 IB_ATOMIC_HCA,
113 IB_ATOMIC_GLOB
116 struct ib_device_attr {
117 u64 fw_ver;
118 __be64 sys_image_guid;
119 u64 max_mr_size;
120 u64 page_size_cap;
121 u32 vendor_id;
122 u32 vendor_part_id;
123 u32 hw_ver;
124 int max_qp;
125 int max_qp_wr;
126 int device_cap_flags;
127 int max_sge;
128 int max_sge_rd;
129 int max_cq;
130 int max_cqe;
131 int max_mr;
132 int max_pd;
133 int max_qp_rd_atom;
134 int max_ee_rd_atom;
135 int max_res_rd_atom;
136 int max_qp_init_rd_atom;
137 int max_ee_init_rd_atom;
138 enum ib_atomic_cap atomic_cap;
139 int max_ee;
140 int max_rdd;
141 int max_mw;
142 int max_raw_ipv6_qp;
143 int max_raw_ethy_qp;
144 int max_mcast_grp;
145 int max_mcast_qp_attach;
146 int max_total_mcast_qp_attach;
147 int max_ah;
148 int max_fmr;
149 int max_map_per_fmr;
150 int max_srq;
151 int max_srq_wr;
152 int max_srq_sge;
153 unsigned int max_fast_reg_page_list_len;
154 u16 max_pkeys;
155 u8 local_ca_ack_delay;
158 enum ib_mtu {
159 IB_MTU_256 = 1,
160 IB_MTU_512 = 2,
161 IB_MTU_1024 = 3,
162 IB_MTU_2048 = 4,
163 IB_MTU_4096 = 5
166 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
168 switch (mtu) {
169 case IB_MTU_256: return 256;
170 case IB_MTU_512: return 512;
171 case IB_MTU_1024: return 1024;
172 case IB_MTU_2048: return 2048;
173 case IB_MTU_4096: return 4096;
174 default: return -1;
178 enum ib_port_state {
179 IB_PORT_NOP = 0,
180 IB_PORT_DOWN = 1,
181 IB_PORT_INIT = 2,
182 IB_PORT_ARMED = 3,
183 IB_PORT_ACTIVE = 4,
184 IB_PORT_ACTIVE_DEFER = 5
187 enum ib_port_cap_flags {
188 IB_PORT_SM = 1 << 1,
189 IB_PORT_NOTICE_SUP = 1 << 2,
190 IB_PORT_TRAP_SUP = 1 << 3,
191 IB_PORT_OPT_IPD_SUP = 1 << 4,
192 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
193 IB_PORT_SL_MAP_SUP = 1 << 6,
194 IB_PORT_MKEY_NVRAM = 1 << 7,
195 IB_PORT_PKEY_NVRAM = 1 << 8,
196 IB_PORT_LED_INFO_SUP = 1 << 9,
197 IB_PORT_SM_DISABLED = 1 << 10,
198 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
199 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
200 IB_PORT_CM_SUP = 1 << 16,
201 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
202 IB_PORT_REINIT_SUP = 1 << 18,
203 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
204 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
205 IB_PORT_DR_NOTICE_SUP = 1 << 21,
206 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
207 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
208 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
209 IB_PORT_CLIENT_REG_SUP = 1 << 25
212 enum ib_port_width {
213 IB_WIDTH_1X = 1,
214 IB_WIDTH_4X = 2,
215 IB_WIDTH_8X = 4,
216 IB_WIDTH_12X = 8
219 static inline int ib_width_enum_to_int(enum ib_port_width width)
221 switch (width) {
222 case IB_WIDTH_1X: return 1;
223 case IB_WIDTH_4X: return 4;
224 case IB_WIDTH_8X: return 8;
225 case IB_WIDTH_12X: return 12;
226 default: return -1;
230 struct ib_protocol_stats {
231 /* TBD... */
234 struct iw_protocol_stats {
235 u64 ipInReceives;
236 u64 ipInHdrErrors;
237 u64 ipInTooBigErrors;
238 u64 ipInNoRoutes;
239 u64 ipInAddrErrors;
240 u64 ipInUnknownProtos;
241 u64 ipInTruncatedPkts;
242 u64 ipInDiscards;
243 u64 ipInDelivers;
244 u64 ipOutForwDatagrams;
245 u64 ipOutRequests;
246 u64 ipOutDiscards;
247 u64 ipOutNoRoutes;
248 u64 ipReasmTimeout;
249 u64 ipReasmReqds;
250 u64 ipReasmOKs;
251 u64 ipReasmFails;
252 u64 ipFragOKs;
253 u64 ipFragFails;
254 u64 ipFragCreates;
255 u64 ipInMcastPkts;
256 u64 ipOutMcastPkts;
257 u64 ipInBcastPkts;
258 u64 ipOutBcastPkts;
260 u64 tcpRtoAlgorithm;
261 u64 tcpRtoMin;
262 u64 tcpRtoMax;
263 u64 tcpMaxConn;
264 u64 tcpActiveOpens;
265 u64 tcpPassiveOpens;
266 u64 tcpAttemptFails;
267 u64 tcpEstabResets;
268 u64 tcpCurrEstab;
269 u64 tcpInSegs;
270 u64 tcpOutSegs;
271 u64 tcpRetransSegs;
272 u64 tcpInErrs;
273 u64 tcpOutRsts;
276 union rdma_protocol_stats {
277 struct ib_protocol_stats ib;
278 struct iw_protocol_stats iw;
281 struct ib_port_attr {
282 enum ib_port_state state;
283 enum ib_mtu max_mtu;
284 enum ib_mtu active_mtu;
285 int gid_tbl_len;
286 u32 port_cap_flags;
287 u32 max_msg_sz;
288 u32 bad_pkey_cntr;
289 u32 qkey_viol_cntr;
290 u16 pkey_tbl_len;
291 u16 lid;
292 u16 sm_lid;
293 u8 lmc;
294 u8 max_vl_num;
295 u8 sm_sl;
296 u8 subnet_timeout;
297 u8 init_type_reply;
298 u8 active_width;
299 u8 active_speed;
300 u8 phys_state;
303 enum ib_device_modify_flags {
304 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
305 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
308 struct ib_device_modify {
309 u64 sys_image_guid;
310 char node_desc[64];
313 enum ib_port_modify_flags {
314 IB_PORT_SHUTDOWN = 1,
315 IB_PORT_INIT_TYPE = (1<<2),
316 IB_PORT_RESET_QKEY_CNTR = (1<<3)
319 struct ib_port_modify {
320 u32 set_port_cap_mask;
321 u32 clr_port_cap_mask;
322 u8 init_type;
325 enum ib_event_type {
326 IB_EVENT_CQ_ERR,
327 IB_EVENT_QP_FATAL,
328 IB_EVENT_QP_REQ_ERR,
329 IB_EVENT_QP_ACCESS_ERR,
330 IB_EVENT_COMM_EST,
331 IB_EVENT_SQ_DRAINED,
332 IB_EVENT_PATH_MIG,
333 IB_EVENT_PATH_MIG_ERR,
334 IB_EVENT_DEVICE_FATAL,
335 IB_EVENT_PORT_ACTIVE,
336 IB_EVENT_PORT_ERR,
337 IB_EVENT_LID_CHANGE,
338 IB_EVENT_PKEY_CHANGE,
339 IB_EVENT_SM_CHANGE,
340 IB_EVENT_SRQ_ERR,
341 IB_EVENT_SRQ_LIMIT_REACHED,
342 IB_EVENT_QP_LAST_WQE_REACHED,
343 IB_EVENT_CLIENT_REREGISTER
346 struct ib_event {
347 struct ib_device *device;
348 union {
349 struct ib_cq *cq;
350 struct ib_qp *qp;
351 struct ib_srq *srq;
352 u8 port_num;
353 } element;
354 enum ib_event_type event;
357 struct ib_event_handler {
358 struct ib_device *device;
359 void (*handler)(struct ib_event_handler *, struct ib_event *);
360 struct list_head list;
363 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
364 do { \
365 (_ptr)->device = _device; \
366 (_ptr)->handler = _handler; \
367 INIT_LIST_HEAD(&(_ptr)->list); \
368 } while (0)
370 struct ib_global_route {
371 union ib_gid dgid;
372 u32 flow_label;
373 u8 sgid_index;
374 u8 hop_limit;
375 u8 traffic_class;
378 struct ib_grh {
379 __be32 version_tclass_flow;
380 __be16 paylen;
381 u8 next_hdr;
382 u8 hop_limit;
383 union ib_gid sgid;
384 union ib_gid dgid;
387 enum {
388 IB_MULTICAST_QPN = 0xffffff
391 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
393 enum ib_ah_flags {
394 IB_AH_GRH = 1
397 enum ib_rate {
398 IB_RATE_PORT_CURRENT = 0,
399 IB_RATE_2_5_GBPS = 2,
400 IB_RATE_5_GBPS = 5,
401 IB_RATE_10_GBPS = 3,
402 IB_RATE_20_GBPS = 6,
403 IB_RATE_30_GBPS = 4,
404 IB_RATE_40_GBPS = 7,
405 IB_RATE_60_GBPS = 8,
406 IB_RATE_80_GBPS = 9,
407 IB_RATE_120_GBPS = 10
411 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
412 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
413 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
414 * @rate: rate to convert.
416 int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
419 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
420 * enum.
421 * @mult: multiple to convert.
423 enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
425 struct ib_ah_attr {
426 struct ib_global_route grh;
427 u16 dlid;
428 u8 sl;
429 u8 src_path_bits;
430 u8 static_rate;
431 u8 ah_flags;
432 u8 port_num;
435 enum ib_wc_status {
436 IB_WC_SUCCESS,
437 IB_WC_LOC_LEN_ERR,
438 IB_WC_LOC_QP_OP_ERR,
439 IB_WC_LOC_EEC_OP_ERR,
440 IB_WC_LOC_PROT_ERR,
441 IB_WC_WR_FLUSH_ERR,
442 IB_WC_MW_BIND_ERR,
443 IB_WC_BAD_RESP_ERR,
444 IB_WC_LOC_ACCESS_ERR,
445 IB_WC_REM_INV_REQ_ERR,
446 IB_WC_REM_ACCESS_ERR,
447 IB_WC_REM_OP_ERR,
448 IB_WC_RETRY_EXC_ERR,
449 IB_WC_RNR_RETRY_EXC_ERR,
450 IB_WC_LOC_RDD_VIOL_ERR,
451 IB_WC_REM_INV_RD_REQ_ERR,
452 IB_WC_REM_ABORT_ERR,
453 IB_WC_INV_EECN_ERR,
454 IB_WC_INV_EEC_STATE_ERR,
455 IB_WC_FATAL_ERR,
456 IB_WC_RESP_TIMEOUT_ERR,
457 IB_WC_GENERAL_ERR
460 enum ib_wc_opcode {
461 IB_WC_SEND,
462 IB_WC_RDMA_WRITE,
463 IB_WC_RDMA_READ,
464 IB_WC_COMP_SWAP,
465 IB_WC_FETCH_ADD,
466 IB_WC_BIND_MW,
467 IB_WC_LSO,
468 IB_WC_LOCAL_INV,
469 IB_WC_FAST_REG_MR,
471 * Set value of IB_WC_RECV so consumers can test if a completion is a
472 * receive by testing (opcode & IB_WC_RECV).
474 IB_WC_RECV = 1 << 7,
475 IB_WC_RECV_RDMA_WITH_IMM
478 enum ib_wc_flags {
479 IB_WC_GRH = 1,
480 IB_WC_WITH_IMM = (1<<1),
481 IB_WC_WITH_INVALIDATE = (1<<2),
484 struct ib_wc {
485 u64 wr_id;
486 enum ib_wc_status status;
487 enum ib_wc_opcode opcode;
488 u32 vendor_err;
489 u32 byte_len;
490 struct ib_qp *qp;
491 union {
492 __be32 imm_data;
493 u32 invalidate_rkey;
494 } ex;
495 u32 src_qp;
496 int wc_flags;
497 u16 pkey_index;
498 u16 slid;
499 u8 sl;
500 u8 dlid_path_bits;
501 u8 port_num; /* valid only for DR SMPs on switches */
502 int csum_ok;
505 enum ib_cq_notify_flags {
506 IB_CQ_SOLICITED = 1 << 0,
507 IB_CQ_NEXT_COMP = 1 << 1,
508 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
509 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
512 enum ib_srq_attr_mask {
513 IB_SRQ_MAX_WR = 1 << 0,
514 IB_SRQ_LIMIT = 1 << 1,
517 struct ib_srq_attr {
518 u32 max_wr;
519 u32 max_sge;
520 u32 srq_limit;
523 struct ib_srq_init_attr {
524 void (*event_handler)(struct ib_event *, void *);
525 void *srq_context;
526 struct ib_srq_attr attr;
529 struct ib_qp_cap {
530 u32 max_send_wr;
531 u32 max_recv_wr;
532 u32 max_send_sge;
533 u32 max_recv_sge;
534 u32 max_inline_data;
537 enum ib_sig_type {
538 IB_SIGNAL_ALL_WR,
539 IB_SIGNAL_REQ_WR
542 enum ib_qp_type {
544 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
545 * here (and in that order) since the MAD layer uses them as
546 * indices into a 2-entry table.
548 IB_QPT_SMI,
549 IB_QPT_GSI,
551 IB_QPT_RC,
552 IB_QPT_UC,
553 IB_QPT_UD,
554 IB_QPT_RAW_IPV6,
555 IB_QPT_RAW_ETY
558 enum ib_qp_create_flags {
559 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
560 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
563 struct ib_qp_init_attr {
564 void (*event_handler)(struct ib_event *, void *);
565 void *qp_context;
566 struct ib_cq *send_cq;
567 struct ib_cq *recv_cq;
568 struct ib_srq *srq;
569 struct ib_qp_cap cap;
570 enum ib_sig_type sq_sig_type;
571 enum ib_qp_type qp_type;
572 enum ib_qp_create_flags create_flags;
573 u8 port_num; /* special QP types only */
576 enum ib_rnr_timeout {
577 IB_RNR_TIMER_655_36 = 0,
578 IB_RNR_TIMER_000_01 = 1,
579 IB_RNR_TIMER_000_02 = 2,
580 IB_RNR_TIMER_000_03 = 3,
581 IB_RNR_TIMER_000_04 = 4,
582 IB_RNR_TIMER_000_06 = 5,
583 IB_RNR_TIMER_000_08 = 6,
584 IB_RNR_TIMER_000_12 = 7,
585 IB_RNR_TIMER_000_16 = 8,
586 IB_RNR_TIMER_000_24 = 9,
587 IB_RNR_TIMER_000_32 = 10,
588 IB_RNR_TIMER_000_48 = 11,
589 IB_RNR_TIMER_000_64 = 12,
590 IB_RNR_TIMER_000_96 = 13,
591 IB_RNR_TIMER_001_28 = 14,
592 IB_RNR_TIMER_001_92 = 15,
593 IB_RNR_TIMER_002_56 = 16,
594 IB_RNR_TIMER_003_84 = 17,
595 IB_RNR_TIMER_005_12 = 18,
596 IB_RNR_TIMER_007_68 = 19,
597 IB_RNR_TIMER_010_24 = 20,
598 IB_RNR_TIMER_015_36 = 21,
599 IB_RNR_TIMER_020_48 = 22,
600 IB_RNR_TIMER_030_72 = 23,
601 IB_RNR_TIMER_040_96 = 24,
602 IB_RNR_TIMER_061_44 = 25,
603 IB_RNR_TIMER_081_92 = 26,
604 IB_RNR_TIMER_122_88 = 27,
605 IB_RNR_TIMER_163_84 = 28,
606 IB_RNR_TIMER_245_76 = 29,
607 IB_RNR_TIMER_327_68 = 30,
608 IB_RNR_TIMER_491_52 = 31
611 enum ib_qp_attr_mask {
612 IB_QP_STATE = 1,
613 IB_QP_CUR_STATE = (1<<1),
614 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
615 IB_QP_ACCESS_FLAGS = (1<<3),
616 IB_QP_PKEY_INDEX = (1<<4),
617 IB_QP_PORT = (1<<5),
618 IB_QP_QKEY = (1<<6),
619 IB_QP_AV = (1<<7),
620 IB_QP_PATH_MTU = (1<<8),
621 IB_QP_TIMEOUT = (1<<9),
622 IB_QP_RETRY_CNT = (1<<10),
623 IB_QP_RNR_RETRY = (1<<11),
624 IB_QP_RQ_PSN = (1<<12),
625 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
626 IB_QP_ALT_PATH = (1<<14),
627 IB_QP_MIN_RNR_TIMER = (1<<15),
628 IB_QP_SQ_PSN = (1<<16),
629 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
630 IB_QP_PATH_MIG_STATE = (1<<18),
631 IB_QP_CAP = (1<<19),
632 IB_QP_DEST_QPN = (1<<20)
635 enum ib_qp_state {
636 IB_QPS_RESET,
637 IB_QPS_INIT,
638 IB_QPS_RTR,
639 IB_QPS_RTS,
640 IB_QPS_SQD,
641 IB_QPS_SQE,
642 IB_QPS_ERR
645 enum ib_mig_state {
646 IB_MIG_MIGRATED,
647 IB_MIG_REARM,
648 IB_MIG_ARMED
651 struct ib_qp_attr {
652 enum ib_qp_state qp_state;
653 enum ib_qp_state cur_qp_state;
654 enum ib_mtu path_mtu;
655 enum ib_mig_state path_mig_state;
656 u32 qkey;
657 u32 rq_psn;
658 u32 sq_psn;
659 u32 dest_qp_num;
660 int qp_access_flags;
661 struct ib_qp_cap cap;
662 struct ib_ah_attr ah_attr;
663 struct ib_ah_attr alt_ah_attr;
664 u16 pkey_index;
665 u16 alt_pkey_index;
666 u8 en_sqd_async_notify;
667 u8 sq_draining;
668 u8 max_rd_atomic;
669 u8 max_dest_rd_atomic;
670 u8 min_rnr_timer;
671 u8 port_num;
672 u8 timeout;
673 u8 retry_cnt;
674 u8 rnr_retry;
675 u8 alt_port_num;
676 u8 alt_timeout;
679 enum ib_wr_opcode {
680 IB_WR_RDMA_WRITE,
681 IB_WR_RDMA_WRITE_WITH_IMM,
682 IB_WR_SEND,
683 IB_WR_SEND_WITH_IMM,
684 IB_WR_RDMA_READ,
685 IB_WR_ATOMIC_CMP_AND_SWP,
686 IB_WR_ATOMIC_FETCH_AND_ADD,
687 IB_WR_LSO,
688 IB_WR_SEND_WITH_INV,
689 IB_WR_RDMA_READ_WITH_INV,
690 IB_WR_LOCAL_INV,
691 IB_WR_FAST_REG_MR,
694 enum ib_send_flags {
695 IB_SEND_FENCE = 1,
696 IB_SEND_SIGNALED = (1<<1),
697 IB_SEND_SOLICITED = (1<<2),
698 IB_SEND_INLINE = (1<<3),
699 IB_SEND_IP_CSUM = (1<<4)
702 struct ib_sge {
703 u64 addr;
704 u32 length;
705 u32 lkey;
708 struct ib_fast_reg_page_list {
709 struct ib_device *device;
710 u64 *page_list;
711 unsigned int max_page_list_len;
714 struct ib_send_wr {
715 struct ib_send_wr *next;
716 u64 wr_id;
717 struct ib_sge *sg_list;
718 int num_sge;
719 enum ib_wr_opcode opcode;
720 int send_flags;
721 union {
722 __be32 imm_data;
723 u32 invalidate_rkey;
724 } ex;
725 union {
726 struct {
727 u64 remote_addr;
728 u32 rkey;
729 } rdma;
730 struct {
731 u64 remote_addr;
732 u64 compare_add;
733 u64 swap;
734 u32 rkey;
735 } atomic;
736 struct {
737 struct ib_ah *ah;
738 void *header;
739 int hlen;
740 int mss;
741 u32 remote_qpn;
742 u32 remote_qkey;
743 u16 pkey_index; /* valid for GSI only */
744 u8 port_num; /* valid for DR SMPs on switch only */
745 } ud;
746 struct {
747 u64 iova_start;
748 struct ib_fast_reg_page_list *page_list;
749 unsigned int page_shift;
750 unsigned int page_list_len;
751 u32 length;
752 int access_flags;
753 u32 rkey;
754 } fast_reg;
755 } wr;
758 struct ib_recv_wr {
759 struct ib_recv_wr *next;
760 u64 wr_id;
761 struct ib_sge *sg_list;
762 int num_sge;
765 enum ib_access_flags {
766 IB_ACCESS_LOCAL_WRITE = 1,
767 IB_ACCESS_REMOTE_WRITE = (1<<1),
768 IB_ACCESS_REMOTE_READ = (1<<2),
769 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
770 IB_ACCESS_MW_BIND = (1<<4)
773 struct ib_phys_buf {
774 u64 addr;
775 u64 size;
778 struct ib_mr_attr {
779 struct ib_pd *pd;
780 u64 device_virt_addr;
781 u64 size;
782 int mr_access_flags;
783 u32 lkey;
784 u32 rkey;
787 enum ib_mr_rereg_flags {
788 IB_MR_REREG_TRANS = 1,
789 IB_MR_REREG_PD = (1<<1),
790 IB_MR_REREG_ACCESS = (1<<2)
793 struct ib_mw_bind {
794 struct ib_mr *mr;
795 u64 wr_id;
796 u64 addr;
797 u32 length;
798 int send_flags;
799 int mw_access_flags;
802 struct ib_fmr_attr {
803 int max_pages;
804 int max_maps;
805 u8 page_shift;
808 struct ib_ucontext {
809 struct ib_device *device;
810 struct list_head pd_list;
811 struct list_head mr_list;
812 struct list_head mw_list;
813 struct list_head cq_list;
814 struct list_head qp_list;
815 struct list_head srq_list;
816 struct list_head ah_list;
817 int closing;
820 struct ib_uobject {
821 u64 user_handle; /* handle given to us by userspace */
822 struct ib_ucontext *context; /* associated user context */
823 void *object; /* containing object */
824 struct list_head list; /* link to context's list */
825 int id; /* index into kernel idr */
826 struct kref ref;
827 struct rw_semaphore mutex; /* protects .live */
828 int live;
831 struct ib_udata {
832 void __user *inbuf;
833 void __user *outbuf;
834 size_t inlen;
835 size_t outlen;
838 struct ib_pd {
839 struct ib_device *device;
840 struct ib_uobject *uobject;
841 atomic_t usecnt; /* count all resources */
844 struct ib_ah {
845 struct ib_device *device;
846 struct ib_pd *pd;
847 struct ib_uobject *uobject;
850 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
852 struct ib_cq {
853 struct ib_device *device;
854 struct ib_uobject *uobject;
855 ib_comp_handler comp_handler;
856 void (*event_handler)(struct ib_event *, void *);
857 void *cq_context;
858 int cqe;
859 atomic_t usecnt; /* count number of work queues */
862 struct ib_srq {
863 struct ib_device *device;
864 struct ib_pd *pd;
865 struct ib_uobject *uobject;
866 void (*event_handler)(struct ib_event *, void *);
867 void *srq_context;
868 atomic_t usecnt;
871 struct ib_qp {
872 struct ib_device *device;
873 struct ib_pd *pd;
874 struct ib_cq *send_cq;
875 struct ib_cq *recv_cq;
876 struct ib_srq *srq;
877 struct ib_uobject *uobject;
878 void (*event_handler)(struct ib_event *, void *);
879 void *qp_context;
880 u32 qp_num;
881 enum ib_qp_type qp_type;
884 struct ib_mr {
885 struct ib_device *device;
886 struct ib_pd *pd;
887 struct ib_uobject *uobject;
888 u32 lkey;
889 u32 rkey;
890 atomic_t usecnt; /* count number of MWs */
893 struct ib_mw {
894 struct ib_device *device;
895 struct ib_pd *pd;
896 struct ib_uobject *uobject;
897 u32 rkey;
900 struct ib_fmr {
901 struct ib_device *device;
902 struct ib_pd *pd;
903 struct list_head list;
904 u32 lkey;
905 u32 rkey;
908 struct ib_mad;
909 struct ib_grh;
911 enum ib_process_mad_flags {
912 IB_MAD_IGNORE_MKEY = 1,
913 IB_MAD_IGNORE_BKEY = 2,
914 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
917 enum ib_mad_result {
918 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
919 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
920 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
921 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
924 #define IB_DEVICE_NAME_MAX 64
926 struct ib_cache {
927 rwlock_t lock;
928 struct ib_event_handler event_handler;
929 struct ib_pkey_cache **pkey_cache;
930 struct ib_gid_cache **gid_cache;
931 u8 *lmc_cache;
934 struct ib_dma_mapping_ops {
935 int (*mapping_error)(struct ib_device *dev,
936 u64 dma_addr);
937 u64 (*map_single)(struct ib_device *dev,
938 void *ptr, size_t size,
939 enum dma_data_direction direction);
940 void (*unmap_single)(struct ib_device *dev,
941 u64 addr, size_t size,
942 enum dma_data_direction direction);
943 u64 (*map_page)(struct ib_device *dev,
944 struct page *page, unsigned long offset,
945 size_t size,
946 enum dma_data_direction direction);
947 void (*unmap_page)(struct ib_device *dev,
948 u64 addr, size_t size,
949 enum dma_data_direction direction);
950 int (*map_sg)(struct ib_device *dev,
951 struct scatterlist *sg, int nents,
952 enum dma_data_direction direction);
953 void (*unmap_sg)(struct ib_device *dev,
954 struct scatterlist *sg, int nents,
955 enum dma_data_direction direction);
956 u64 (*dma_address)(struct ib_device *dev,
957 struct scatterlist *sg);
958 unsigned int (*dma_len)(struct ib_device *dev,
959 struct scatterlist *sg);
960 void (*sync_single_for_cpu)(struct ib_device *dev,
961 u64 dma_handle,
962 size_t size,
963 enum dma_data_direction dir);
964 void (*sync_single_for_device)(struct ib_device *dev,
965 u64 dma_handle,
966 size_t size,
967 enum dma_data_direction dir);
968 void *(*alloc_coherent)(struct ib_device *dev,
969 size_t size,
970 u64 *dma_handle,
971 gfp_t flag);
972 void (*free_coherent)(struct ib_device *dev,
973 size_t size, void *cpu_addr,
974 u64 dma_handle);
977 struct iw_cm_verbs;
979 struct ib_device {
980 struct device *dma_device;
982 char name[IB_DEVICE_NAME_MAX];
984 struct list_head event_handler_list;
985 spinlock_t event_handler_lock;
987 struct list_head core_list;
988 struct list_head client_data_list;
989 spinlock_t client_data_lock;
991 struct ib_cache cache;
992 int *pkey_tbl_len;
993 int *gid_tbl_len;
995 int num_comp_vectors;
997 struct iw_cm_verbs *iwcm;
999 int (*get_protocol_stats)(struct ib_device *device,
1000 union rdma_protocol_stats *stats);
1001 int (*query_device)(struct ib_device *device,
1002 struct ib_device_attr *device_attr);
1003 int (*query_port)(struct ib_device *device,
1004 u8 port_num,
1005 struct ib_port_attr *port_attr);
1006 int (*query_gid)(struct ib_device *device,
1007 u8 port_num, int index,
1008 union ib_gid *gid);
1009 int (*query_pkey)(struct ib_device *device,
1010 u8 port_num, u16 index, u16 *pkey);
1011 int (*modify_device)(struct ib_device *device,
1012 int device_modify_mask,
1013 struct ib_device_modify *device_modify);
1014 int (*modify_port)(struct ib_device *device,
1015 u8 port_num, int port_modify_mask,
1016 struct ib_port_modify *port_modify);
1017 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1018 struct ib_udata *udata);
1019 int (*dealloc_ucontext)(struct ib_ucontext *context);
1020 int (*mmap)(struct ib_ucontext *context,
1021 struct vm_area_struct *vma);
1022 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1023 struct ib_ucontext *context,
1024 struct ib_udata *udata);
1025 int (*dealloc_pd)(struct ib_pd *pd);
1026 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1027 struct ib_ah_attr *ah_attr);
1028 int (*modify_ah)(struct ib_ah *ah,
1029 struct ib_ah_attr *ah_attr);
1030 int (*query_ah)(struct ib_ah *ah,
1031 struct ib_ah_attr *ah_attr);
1032 int (*destroy_ah)(struct ib_ah *ah);
1033 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1034 struct ib_srq_init_attr *srq_init_attr,
1035 struct ib_udata *udata);
1036 int (*modify_srq)(struct ib_srq *srq,
1037 struct ib_srq_attr *srq_attr,
1038 enum ib_srq_attr_mask srq_attr_mask,
1039 struct ib_udata *udata);
1040 int (*query_srq)(struct ib_srq *srq,
1041 struct ib_srq_attr *srq_attr);
1042 int (*destroy_srq)(struct ib_srq *srq);
1043 int (*post_srq_recv)(struct ib_srq *srq,
1044 struct ib_recv_wr *recv_wr,
1045 struct ib_recv_wr **bad_recv_wr);
1046 struct ib_qp * (*create_qp)(struct ib_pd *pd,
1047 struct ib_qp_init_attr *qp_init_attr,
1048 struct ib_udata *udata);
1049 int (*modify_qp)(struct ib_qp *qp,
1050 struct ib_qp_attr *qp_attr,
1051 int qp_attr_mask,
1052 struct ib_udata *udata);
1053 int (*query_qp)(struct ib_qp *qp,
1054 struct ib_qp_attr *qp_attr,
1055 int qp_attr_mask,
1056 struct ib_qp_init_attr *qp_init_attr);
1057 int (*destroy_qp)(struct ib_qp *qp);
1058 int (*post_send)(struct ib_qp *qp,
1059 struct ib_send_wr *send_wr,
1060 struct ib_send_wr **bad_send_wr);
1061 int (*post_recv)(struct ib_qp *qp,
1062 struct ib_recv_wr *recv_wr,
1063 struct ib_recv_wr **bad_recv_wr);
1064 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
1065 int comp_vector,
1066 struct ib_ucontext *context,
1067 struct ib_udata *udata);
1068 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1069 u16 cq_period);
1070 int (*destroy_cq)(struct ib_cq *cq);
1071 int (*resize_cq)(struct ib_cq *cq, int cqe,
1072 struct ib_udata *udata);
1073 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1074 struct ib_wc *wc);
1075 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1076 int (*req_notify_cq)(struct ib_cq *cq,
1077 enum ib_cq_notify_flags flags);
1078 int (*req_ncomp_notif)(struct ib_cq *cq,
1079 int wc_cnt);
1080 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1081 int mr_access_flags);
1082 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1083 struct ib_phys_buf *phys_buf_array,
1084 int num_phys_buf,
1085 int mr_access_flags,
1086 u64 *iova_start);
1087 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
1088 u64 start, u64 length,
1089 u64 virt_addr,
1090 int mr_access_flags,
1091 struct ib_udata *udata);
1092 int (*query_mr)(struct ib_mr *mr,
1093 struct ib_mr_attr *mr_attr);
1094 int (*dereg_mr)(struct ib_mr *mr);
1095 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1096 int max_page_list_len);
1097 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1098 int page_list_len);
1099 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1100 int (*rereg_phys_mr)(struct ib_mr *mr,
1101 int mr_rereg_mask,
1102 struct ib_pd *pd,
1103 struct ib_phys_buf *phys_buf_array,
1104 int num_phys_buf,
1105 int mr_access_flags,
1106 u64 *iova_start);
1107 struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
1108 int (*bind_mw)(struct ib_qp *qp,
1109 struct ib_mw *mw,
1110 struct ib_mw_bind *mw_bind);
1111 int (*dealloc_mw)(struct ib_mw *mw);
1112 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1113 int mr_access_flags,
1114 struct ib_fmr_attr *fmr_attr);
1115 int (*map_phys_fmr)(struct ib_fmr *fmr,
1116 u64 *page_list, int list_len,
1117 u64 iova);
1118 int (*unmap_fmr)(struct list_head *fmr_list);
1119 int (*dealloc_fmr)(struct ib_fmr *fmr);
1120 int (*attach_mcast)(struct ib_qp *qp,
1121 union ib_gid *gid,
1122 u16 lid);
1123 int (*detach_mcast)(struct ib_qp *qp,
1124 union ib_gid *gid,
1125 u16 lid);
1126 int (*process_mad)(struct ib_device *device,
1127 int process_mad_flags,
1128 u8 port_num,
1129 struct ib_wc *in_wc,
1130 struct ib_grh *in_grh,
1131 struct ib_mad *in_mad,
1132 struct ib_mad *out_mad);
1134 struct ib_dma_mapping_ops *dma_ops;
1136 struct module *owner;
1137 struct device dev;
1138 struct kobject *ports_parent;
1139 struct list_head port_list;
1141 enum {
1142 IB_DEV_UNINITIALIZED,
1143 IB_DEV_REGISTERED,
1144 IB_DEV_UNREGISTERED
1145 } reg_state;
1147 u64 uverbs_cmd_mask;
1148 int uverbs_abi_ver;
1150 char node_desc[64];
1151 __be64 node_guid;
1152 u32 local_dma_lkey;
1153 u8 node_type;
1154 u8 phys_port_cnt;
1157 struct ib_client {
1158 char *name;
1159 void (*add) (struct ib_device *);
1160 void (*remove)(struct ib_device *);
1162 struct list_head list;
1165 struct ib_device *ib_alloc_device(size_t size);
1166 void ib_dealloc_device(struct ib_device *device);
1168 int ib_register_device (struct ib_device *device);
1169 void ib_unregister_device(struct ib_device *device);
1171 int ib_register_client (struct ib_client *client);
1172 void ib_unregister_client(struct ib_client *client);
1174 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1175 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1176 void *data);
1178 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1180 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1183 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1185 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1189 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1190 * contains all required attributes and no attributes not allowed for
1191 * the given QP state transition.
1192 * @cur_state: Current QP state
1193 * @next_state: Next QP state
1194 * @type: QP type
1195 * @mask: Mask of supplied QP attributes
1197 * This function is a helper function that a low-level driver's
1198 * modify_qp method can use to validate the consumer's input. It
1199 * checks that cur_state and next_state are valid QP states, that a
1200 * transition from cur_state to next_state is allowed by the IB spec,
1201 * and that the attribute mask supplied is allowed for the transition.
1203 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1204 enum ib_qp_type type, enum ib_qp_attr_mask mask);
1206 int ib_register_event_handler (struct ib_event_handler *event_handler);
1207 int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1208 void ib_dispatch_event(struct ib_event *event);
1210 int ib_query_device(struct ib_device *device,
1211 struct ib_device_attr *device_attr);
1213 int ib_query_port(struct ib_device *device,
1214 u8 port_num, struct ib_port_attr *port_attr);
1216 int ib_query_gid(struct ib_device *device,
1217 u8 port_num, int index, union ib_gid *gid);
1219 int ib_query_pkey(struct ib_device *device,
1220 u8 port_num, u16 index, u16 *pkey);
1222 int ib_modify_device(struct ib_device *device,
1223 int device_modify_mask,
1224 struct ib_device_modify *device_modify);
1226 int ib_modify_port(struct ib_device *device,
1227 u8 port_num, int port_modify_mask,
1228 struct ib_port_modify *port_modify);
1230 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1231 u8 *port_num, u16 *index);
1233 int ib_find_pkey(struct ib_device *device,
1234 u8 port_num, u16 pkey, u16 *index);
1237 * ib_alloc_pd - Allocates an unused protection domain.
1238 * @device: The device on which to allocate the protection domain.
1240 * A protection domain object provides an association between QPs, shared
1241 * receive queues, address handles, memory regions, and memory windows.
1243 struct ib_pd *ib_alloc_pd(struct ib_device *device);
1246 * ib_dealloc_pd - Deallocates a protection domain.
1247 * @pd: The protection domain to deallocate.
1249 int ib_dealloc_pd(struct ib_pd *pd);
1252 * ib_create_ah - Creates an address handle for the given address vector.
1253 * @pd: The protection domain associated with the address handle.
1254 * @ah_attr: The attributes of the address vector.
1256 * The address handle is used to reference a local or global destination
1257 * in all UD QP post sends.
1259 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1262 * ib_init_ah_from_wc - Initializes address handle attributes from a
1263 * work completion.
1264 * @device: Device on which the received message arrived.
1265 * @port_num: Port on which the received message arrived.
1266 * @wc: Work completion associated with the received message.
1267 * @grh: References the received global route header. This parameter is
1268 * ignored unless the work completion indicates that the GRH is valid.
1269 * @ah_attr: Returned attributes that can be used when creating an address
1270 * handle for replying to the message.
1272 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1273 struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1276 * ib_create_ah_from_wc - Creates an address handle associated with the
1277 * sender of the specified work completion.
1278 * @pd: The protection domain associated with the address handle.
1279 * @wc: Work completion information associated with a received message.
1280 * @grh: References the received global route header. This parameter is
1281 * ignored unless the work completion indicates that the GRH is valid.
1282 * @port_num: The outbound port number to associate with the address.
1284 * The address handle is used to reference a local or global destination
1285 * in all UD QP post sends.
1287 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1288 struct ib_grh *grh, u8 port_num);
1291 * ib_modify_ah - Modifies the address vector associated with an address
1292 * handle.
1293 * @ah: The address handle to modify.
1294 * @ah_attr: The new address vector attributes to associate with the
1295 * address handle.
1297 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1300 * ib_query_ah - Queries the address vector associated with an address
1301 * handle.
1302 * @ah: The address handle to query.
1303 * @ah_attr: The address vector attributes associated with the address
1304 * handle.
1306 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1309 * ib_destroy_ah - Destroys an address handle.
1310 * @ah: The address handle to destroy.
1312 int ib_destroy_ah(struct ib_ah *ah);
1315 * ib_create_srq - Creates a SRQ associated with the specified protection
1316 * domain.
1317 * @pd: The protection domain associated with the SRQ.
1318 * @srq_init_attr: A list of initial attributes required to create the
1319 * SRQ. If SRQ creation succeeds, then the attributes are updated to
1320 * the actual capabilities of the created SRQ.
1322 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1323 * requested size of the SRQ, and set to the actual values allocated
1324 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
1325 * will always be at least as large as the requested values.
1327 struct ib_srq *ib_create_srq(struct ib_pd *pd,
1328 struct ib_srq_init_attr *srq_init_attr);
1331 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1332 * @srq: The SRQ to modify.
1333 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
1334 * the current values of selected SRQ attributes are returned.
1335 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1336 * are being modified.
1338 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1339 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1340 * the number of receives queued drops below the limit.
1342 int ib_modify_srq(struct ib_srq *srq,
1343 struct ib_srq_attr *srq_attr,
1344 enum ib_srq_attr_mask srq_attr_mask);
1347 * ib_query_srq - Returns the attribute list and current values for the
1348 * specified SRQ.
1349 * @srq: The SRQ to query.
1350 * @srq_attr: The attributes of the specified SRQ.
1352 int ib_query_srq(struct ib_srq *srq,
1353 struct ib_srq_attr *srq_attr);
1356 * ib_destroy_srq - Destroys the specified SRQ.
1357 * @srq: The SRQ to destroy.
1359 int ib_destroy_srq(struct ib_srq *srq);
1362 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1363 * @srq: The SRQ to post the work request on.
1364 * @recv_wr: A list of work requests to post on the receive queue.
1365 * @bad_recv_wr: On an immediate failure, this parameter will reference
1366 * the work request that failed to be posted on the QP.
1368 static inline int ib_post_srq_recv(struct ib_srq *srq,
1369 struct ib_recv_wr *recv_wr,
1370 struct ib_recv_wr **bad_recv_wr)
1372 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1376 * ib_create_qp - Creates a QP associated with the specified protection
1377 * domain.
1378 * @pd: The protection domain associated with the QP.
1379 * @qp_init_attr: A list of initial attributes required to create the
1380 * QP. If QP creation succeeds, then the attributes are updated to
1381 * the actual capabilities of the created QP.
1383 struct ib_qp *ib_create_qp(struct ib_pd *pd,
1384 struct ib_qp_init_attr *qp_init_attr);
1387 * ib_modify_qp - Modifies the attributes for the specified QP and then
1388 * transitions the QP to the given state.
1389 * @qp: The QP to modify.
1390 * @qp_attr: On input, specifies the QP attributes to modify. On output,
1391 * the current values of selected QP attributes are returned.
1392 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1393 * are being modified.
1395 int ib_modify_qp(struct ib_qp *qp,
1396 struct ib_qp_attr *qp_attr,
1397 int qp_attr_mask);
1400 * ib_query_qp - Returns the attribute list and current values for the
1401 * specified QP.
1402 * @qp: The QP to query.
1403 * @qp_attr: The attributes of the specified QP.
1404 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1405 * @qp_init_attr: Additional attributes of the selected QP.
1407 * The qp_attr_mask may be used to limit the query to gathering only the
1408 * selected attributes.
1410 int ib_query_qp(struct ib_qp *qp,
1411 struct ib_qp_attr *qp_attr,
1412 int qp_attr_mask,
1413 struct ib_qp_init_attr *qp_init_attr);
1416 * ib_destroy_qp - Destroys the specified QP.
1417 * @qp: The QP to destroy.
1419 int ib_destroy_qp(struct ib_qp *qp);
1422 * ib_post_send - Posts a list of work requests to the send queue of
1423 * the specified QP.
1424 * @qp: The QP to post the work request on.
1425 * @send_wr: A list of work requests to post on the send queue.
1426 * @bad_send_wr: On an immediate failure, this parameter will reference
1427 * the work request that failed to be posted on the QP.
1429 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1430 * error is returned, the QP state shall not be affected,
1431 * ib_post_send() will return an immediate error after queueing any
1432 * earlier work requests in the list.
1434 static inline int ib_post_send(struct ib_qp *qp,
1435 struct ib_send_wr *send_wr,
1436 struct ib_send_wr **bad_send_wr)
1438 return qp->device->post_send(qp, send_wr, bad_send_wr);
1442 * ib_post_recv - Posts a list of work requests to the receive queue of
1443 * the specified QP.
1444 * @qp: The QP to post the work request on.
1445 * @recv_wr: A list of work requests to post on the receive queue.
1446 * @bad_recv_wr: On an immediate failure, this parameter will reference
1447 * the work request that failed to be posted on the QP.
1449 static inline int ib_post_recv(struct ib_qp *qp,
1450 struct ib_recv_wr *recv_wr,
1451 struct ib_recv_wr **bad_recv_wr)
1453 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1457 * ib_create_cq - Creates a CQ on the specified device.
1458 * @device: The device on which to create the CQ.
1459 * @comp_handler: A user-specified callback that is invoked when a
1460 * completion event occurs on the CQ.
1461 * @event_handler: A user-specified callback that is invoked when an
1462 * asynchronous event not associated with a completion occurs on the CQ.
1463 * @cq_context: Context associated with the CQ returned to the user via
1464 * the associated completion and event handlers.
1465 * @cqe: The minimum size of the CQ.
1466 * @comp_vector - Completion vector used to signal completion events.
1467 * Must be >= 0 and < context->num_comp_vectors.
1469 * Users can examine the cq structure to determine the actual CQ size.
1471 struct ib_cq *ib_create_cq(struct ib_device *device,
1472 ib_comp_handler comp_handler,
1473 void (*event_handler)(struct ib_event *, void *),
1474 void *cq_context, int cqe, int comp_vector);
1477 * ib_resize_cq - Modifies the capacity of the CQ.
1478 * @cq: The CQ to resize.
1479 * @cqe: The minimum size of the CQ.
1481 * Users can examine the cq structure to determine the actual CQ size.
1483 int ib_resize_cq(struct ib_cq *cq, int cqe);
1486 * ib_modify_cq - Modifies moderation params of the CQ
1487 * @cq: The CQ to modify.
1488 * @cq_count: number of CQEs that will trigger an event
1489 * @cq_period: max period of time in usec before triggering an event
1492 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1495 * ib_destroy_cq - Destroys the specified CQ.
1496 * @cq: The CQ to destroy.
1498 int ib_destroy_cq(struct ib_cq *cq);
1501 * ib_poll_cq - poll a CQ for completion(s)
1502 * @cq:the CQ being polled
1503 * @num_entries:maximum number of completions to return
1504 * @wc:array of at least @num_entries &struct ib_wc where completions
1505 * will be returned
1507 * Poll a CQ for (possibly multiple) completions. If the return value
1508 * is < 0, an error occurred. If the return value is >= 0, it is the
1509 * number of completions returned. If the return value is
1510 * non-negative and < num_entries, then the CQ was emptied.
1512 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1513 struct ib_wc *wc)
1515 return cq->device->poll_cq(cq, num_entries, wc);
1519 * ib_peek_cq - Returns the number of unreaped completions currently
1520 * on the specified CQ.
1521 * @cq: The CQ to peek.
1522 * @wc_cnt: A minimum number of unreaped completions to check for.
1524 * If the number of unreaped completions is greater than or equal to wc_cnt,
1525 * this function returns wc_cnt, otherwise, it returns the actual number of
1526 * unreaped completions.
1528 int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1531 * ib_req_notify_cq - Request completion notification on a CQ.
1532 * @cq: The CQ to generate an event for.
1533 * @flags:
1534 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1535 * to request an event on the next solicited event or next work
1536 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1537 * may also be |ed in to request a hint about missed events, as
1538 * described below.
1540 * Return Value:
1541 * < 0 means an error occurred while requesting notification
1542 * == 0 means notification was requested successfully, and if
1543 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1544 * were missed and it is safe to wait for another event. In
1545 * this case is it guaranteed that any work completions added
1546 * to the CQ since the last CQ poll will trigger a completion
1547 * notification event.
1548 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1549 * in. It means that the consumer must poll the CQ again to
1550 * make sure it is empty to avoid missing an event because of a
1551 * race between requesting notification and an entry being
1552 * added to the CQ. This return value means it is possible
1553 * (but not guaranteed) that a work completion has been added
1554 * to the CQ since the last poll without triggering a
1555 * completion notification event.
1557 static inline int ib_req_notify_cq(struct ib_cq *cq,
1558 enum ib_cq_notify_flags flags)
1560 return cq->device->req_notify_cq(cq, flags);
1564 * ib_req_ncomp_notif - Request completion notification when there are
1565 * at least the specified number of unreaped completions on the CQ.
1566 * @cq: The CQ to generate an event for.
1567 * @wc_cnt: The number of unreaped completions that should be on the
1568 * CQ before an event is generated.
1570 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1572 return cq->device->req_ncomp_notif ?
1573 cq->device->req_ncomp_notif(cq, wc_cnt) :
1574 -ENOSYS;
1578 * ib_get_dma_mr - Returns a memory region for system memory that is
1579 * usable for DMA.
1580 * @pd: The protection domain associated with the memory region.
1581 * @mr_access_flags: Specifies the memory access rights.
1583 * Note that the ib_dma_*() functions defined below must be used
1584 * to create/destroy addresses used with the Lkey or Rkey returned
1585 * by ib_get_dma_mr().
1587 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1590 * ib_dma_mapping_error - check a DMA addr for error
1591 * @dev: The device for which the dma_addr was created
1592 * @dma_addr: The DMA address to check
1594 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1596 if (dev->dma_ops)
1597 return dev->dma_ops->mapping_error(dev, dma_addr);
1598 return dma_mapping_error(dev->dma_device, dma_addr);
1602 * ib_dma_map_single - Map a kernel virtual address to DMA address
1603 * @dev: The device for which the dma_addr is to be created
1604 * @cpu_addr: The kernel virtual address
1605 * @size: The size of the region in bytes
1606 * @direction: The direction of the DMA
1608 static inline u64 ib_dma_map_single(struct ib_device *dev,
1609 void *cpu_addr, size_t size,
1610 enum dma_data_direction direction)
1612 if (dev->dma_ops)
1613 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1614 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1618 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1619 * @dev: The device for which the DMA address was created
1620 * @addr: The DMA address
1621 * @size: The size of the region in bytes
1622 * @direction: The direction of the DMA
1624 static inline void ib_dma_unmap_single(struct ib_device *dev,
1625 u64 addr, size_t size,
1626 enum dma_data_direction direction)
1628 if (dev->dma_ops)
1629 dev->dma_ops->unmap_single(dev, addr, size, direction);
1630 else
1631 dma_unmap_single(dev->dma_device, addr, size, direction);
1634 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1635 void *cpu_addr, size_t size,
1636 enum dma_data_direction direction,
1637 struct dma_attrs *attrs)
1639 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1640 direction, attrs);
1643 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1644 u64 addr, size_t size,
1645 enum dma_data_direction direction,
1646 struct dma_attrs *attrs)
1648 return dma_unmap_single_attrs(dev->dma_device, addr, size,
1649 direction, attrs);
1653 * ib_dma_map_page - Map a physical page to DMA address
1654 * @dev: The device for which the dma_addr is to be created
1655 * @page: The page to be mapped
1656 * @offset: The offset within the page
1657 * @size: The size of the region in bytes
1658 * @direction: The direction of the DMA
1660 static inline u64 ib_dma_map_page(struct ib_device *dev,
1661 struct page *page,
1662 unsigned long offset,
1663 size_t size,
1664 enum dma_data_direction direction)
1666 if (dev->dma_ops)
1667 return dev->dma_ops->map_page(dev, page, offset, size, direction);
1668 return dma_map_page(dev->dma_device, page, offset, size, direction);
1672 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1673 * @dev: The device for which the DMA address was created
1674 * @addr: The DMA address
1675 * @size: The size of the region in bytes
1676 * @direction: The direction of the DMA
1678 static inline void ib_dma_unmap_page(struct ib_device *dev,
1679 u64 addr, size_t size,
1680 enum dma_data_direction direction)
1682 if (dev->dma_ops)
1683 dev->dma_ops->unmap_page(dev, addr, size, direction);
1684 else
1685 dma_unmap_page(dev->dma_device, addr, size, direction);
1689 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1690 * @dev: The device for which the DMA addresses are to be created
1691 * @sg: The array of scatter/gather entries
1692 * @nents: The number of scatter/gather entries
1693 * @direction: The direction of the DMA
1695 static inline int ib_dma_map_sg(struct ib_device *dev,
1696 struct scatterlist *sg, int nents,
1697 enum dma_data_direction direction)
1699 if (dev->dma_ops)
1700 return dev->dma_ops->map_sg(dev, sg, nents, direction);
1701 return dma_map_sg(dev->dma_device, sg, nents, direction);
1705 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1706 * @dev: The device for which the DMA addresses were created
1707 * @sg: The array of scatter/gather entries
1708 * @nents: The number of scatter/gather entries
1709 * @direction: The direction of the DMA
1711 static inline void ib_dma_unmap_sg(struct ib_device *dev,
1712 struct scatterlist *sg, int nents,
1713 enum dma_data_direction direction)
1715 if (dev->dma_ops)
1716 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1717 else
1718 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1721 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1722 struct scatterlist *sg, int nents,
1723 enum dma_data_direction direction,
1724 struct dma_attrs *attrs)
1726 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1729 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1730 struct scatterlist *sg, int nents,
1731 enum dma_data_direction direction,
1732 struct dma_attrs *attrs)
1734 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1737 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1738 * @dev: The device for which the DMA addresses were created
1739 * @sg: The scatter/gather entry
1741 static inline u64 ib_sg_dma_address(struct ib_device *dev,
1742 struct scatterlist *sg)
1744 if (dev->dma_ops)
1745 return dev->dma_ops->dma_address(dev, sg);
1746 return sg_dma_address(sg);
1750 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1751 * @dev: The device for which the DMA addresses were created
1752 * @sg: The scatter/gather entry
1754 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1755 struct scatterlist *sg)
1757 if (dev->dma_ops)
1758 return dev->dma_ops->dma_len(dev, sg);
1759 return sg_dma_len(sg);
1763 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1764 * @dev: The device for which the DMA address was created
1765 * @addr: The DMA address
1766 * @size: The size of the region in bytes
1767 * @dir: The direction of the DMA
1769 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1770 u64 addr,
1771 size_t size,
1772 enum dma_data_direction dir)
1774 if (dev->dma_ops)
1775 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1776 else
1777 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1781 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1782 * @dev: The device for which the DMA address was created
1783 * @addr: The DMA address
1784 * @size: The size of the region in bytes
1785 * @dir: The direction of the DMA
1787 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1788 u64 addr,
1789 size_t size,
1790 enum dma_data_direction dir)
1792 if (dev->dma_ops)
1793 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1794 else
1795 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1799 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1800 * @dev: The device for which the DMA address is requested
1801 * @size: The size of the region to allocate in bytes
1802 * @dma_handle: A pointer for returning the DMA address of the region
1803 * @flag: memory allocator flags
1805 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1806 size_t size,
1807 u64 *dma_handle,
1808 gfp_t flag)
1810 if (dev->dma_ops)
1811 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1812 else {
1813 dma_addr_t handle;
1814 void *ret;
1816 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1817 *dma_handle = handle;
1818 return ret;
1823 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1824 * @dev: The device for which the DMA addresses were allocated
1825 * @size: The size of the region
1826 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1827 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1829 static inline void ib_dma_free_coherent(struct ib_device *dev,
1830 size_t size, void *cpu_addr,
1831 u64 dma_handle)
1833 if (dev->dma_ops)
1834 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1835 else
1836 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1840 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1841 * by an HCA.
1842 * @pd: The protection domain associated assigned to the registered region.
1843 * @phys_buf_array: Specifies a list of physical buffers to use in the
1844 * memory region.
1845 * @num_phys_buf: Specifies the size of the phys_buf_array.
1846 * @mr_access_flags: Specifies the memory access rights.
1847 * @iova_start: The offset of the region's starting I/O virtual address.
1849 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1850 struct ib_phys_buf *phys_buf_array,
1851 int num_phys_buf,
1852 int mr_access_flags,
1853 u64 *iova_start);
1856 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1857 * Conceptually, this call performs the functions deregister memory region
1858 * followed by register physical memory region. Where possible,
1859 * resources are reused instead of deallocated and reallocated.
1860 * @mr: The memory region to modify.
1861 * @mr_rereg_mask: A bit-mask used to indicate which of the following
1862 * properties of the memory region are being modified.
1863 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1864 * the new protection domain to associated with the memory region,
1865 * otherwise, this parameter is ignored.
1866 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1867 * field specifies a list of physical buffers to use in the new
1868 * translation, otherwise, this parameter is ignored.
1869 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1870 * field specifies the size of the phys_buf_array, otherwise, this
1871 * parameter is ignored.
1872 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1873 * field specifies the new memory access rights, otherwise, this
1874 * parameter is ignored.
1875 * @iova_start: The offset of the region's starting I/O virtual address.
1877 int ib_rereg_phys_mr(struct ib_mr *mr,
1878 int mr_rereg_mask,
1879 struct ib_pd *pd,
1880 struct ib_phys_buf *phys_buf_array,
1881 int num_phys_buf,
1882 int mr_access_flags,
1883 u64 *iova_start);
1886 * ib_query_mr - Retrieves information about a specific memory region.
1887 * @mr: The memory region to retrieve information about.
1888 * @mr_attr: The attributes of the specified memory region.
1890 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1893 * ib_dereg_mr - Deregisters a memory region and removes it from the
1894 * HCA translation table.
1895 * @mr: The memory region to deregister.
1897 int ib_dereg_mr(struct ib_mr *mr);
1900 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
1901 * IB_WR_FAST_REG_MR send work request.
1902 * @pd: The protection domain associated with the region.
1903 * @max_page_list_len: requested max physical buffer list length to be
1904 * used with fast register work requests for this MR.
1906 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
1909 * ib_alloc_fast_reg_page_list - Allocates a page list array
1910 * @device - ib device pointer.
1911 * @page_list_len - size of the page list array to be allocated.
1913 * This allocates and returns a struct ib_fast_reg_page_list * and a
1914 * page_list array that is at least page_list_len in size. The actual
1915 * size is returned in max_page_list_len. The caller is responsible
1916 * for initializing the contents of the page_list array before posting
1917 * a send work request with the IB_WC_FAST_REG_MR opcode.
1919 * The page_list array entries must be translated using one of the
1920 * ib_dma_*() functions just like the addresses passed to
1921 * ib_map_phys_fmr(). Once the ib_post_send() is issued, the struct
1922 * ib_fast_reg_page_list must not be modified by the caller until the
1923 * IB_WC_FAST_REG_MR work request completes.
1925 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
1926 struct ib_device *device, int page_list_len);
1929 * ib_free_fast_reg_page_list - Deallocates a previously allocated
1930 * page list array.
1931 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
1933 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
1936 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
1937 * R_Key and L_Key.
1938 * @mr - struct ib_mr pointer to be updated.
1939 * @newkey - new key to be used.
1941 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
1943 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
1944 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
1948 * ib_alloc_mw - Allocates a memory window.
1949 * @pd: The protection domain associated with the memory window.
1951 struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1954 * ib_bind_mw - Posts a work request to the send queue of the specified
1955 * QP, which binds the memory window to the given address range and
1956 * remote access attributes.
1957 * @qp: QP to post the bind work request on.
1958 * @mw: The memory window to bind.
1959 * @mw_bind: Specifies information about the memory window, including
1960 * its address range, remote access rights, and associated memory region.
1962 static inline int ib_bind_mw(struct ib_qp *qp,
1963 struct ib_mw *mw,
1964 struct ib_mw_bind *mw_bind)
1966 /* XXX reference counting in corresponding MR? */
1967 return mw->device->bind_mw ?
1968 mw->device->bind_mw(qp, mw, mw_bind) :
1969 -ENOSYS;
1973 * ib_dealloc_mw - Deallocates a memory window.
1974 * @mw: The memory window to deallocate.
1976 int ib_dealloc_mw(struct ib_mw *mw);
1979 * ib_alloc_fmr - Allocates a unmapped fast memory region.
1980 * @pd: The protection domain associated with the unmapped region.
1981 * @mr_access_flags: Specifies the memory access rights.
1982 * @fmr_attr: Attributes of the unmapped region.
1984 * A fast memory region must be mapped before it can be used as part of
1985 * a work request.
1987 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1988 int mr_access_flags,
1989 struct ib_fmr_attr *fmr_attr);
1992 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
1993 * @fmr: The fast memory region to associate with the pages.
1994 * @page_list: An array of physical pages to map to the fast memory region.
1995 * @list_len: The number of pages in page_list.
1996 * @iova: The I/O virtual address to use with the mapped region.
1998 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
1999 u64 *page_list, int list_len,
2000 u64 iova)
2002 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2006 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2007 * @fmr_list: A linked list of fast memory regions to unmap.
2009 int ib_unmap_fmr(struct list_head *fmr_list);
2012 * ib_dealloc_fmr - Deallocates a fast memory region.
2013 * @fmr: The fast memory region to deallocate.
2015 int ib_dealloc_fmr(struct ib_fmr *fmr);
2018 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2019 * @qp: QP to attach to the multicast group. The QP must be type
2020 * IB_QPT_UD.
2021 * @gid: Multicast group GID.
2022 * @lid: Multicast group LID in host byte order.
2024 * In order to send and receive multicast packets, subnet
2025 * administration must have created the multicast group and configured
2026 * the fabric appropriately. The port associated with the specified
2027 * QP must also be a member of the multicast group.
2029 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2032 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2033 * @qp: QP to detach from the multicast group.
2034 * @gid: Multicast group GID.
2035 * @lid: Multicast group LID in host byte order.
2037 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2039 #endif /* IB_VERBS_H */