Linux 4.14.13
[linux-stable.git] / drivers / infiniband / hw / vmw_pvrdma / pvrdma_dev_api.h
blobdf0a6b5250210db9f0dec11a71115517df20fbff
1 /*
2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
17 * The BSD 2-Clause License
19 * Redistribution and use in source and binary forms, with or
20 * without modification, are permitted provided that the following
21 * conditions are met:
23 * - Redistributions of source code must retain the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer.
27 * - Redistributions in binary form must reproduce the above
28 * copyright notice, this list of conditions and the following
29 * disclaimer in the documentation and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 #ifndef __PVRDMA_DEV_API_H__
47 #define __PVRDMA_DEV_API_H__
49 #include <linux/types.h>
51 #include "pvrdma_verbs.h"
54 * PVRDMA version macros. Some new features require updates to PVRDMA_VERSION.
55 * These macros allow us to check for different features if necessary.
58 #define PVRDMA_ROCEV1_VERSION 17
59 #define PVRDMA_ROCEV2_VERSION 18
60 #define PVRDMA_VERSION PVRDMA_ROCEV2_VERSION
62 #define PVRDMA_BOARD_ID 1
63 #define PVRDMA_REV_ID 1
66 * Masks and accessors for page directory, which is a two-level lookup:
67 * page directory -> page table -> page. Only one directory for now, but we
68 * could expand that easily. 9 bits for tables, 9 bits for pages, gives one
69 * gigabyte for memory regions and so forth.
72 #define PVRDMA_PDIR_SHIFT 18
73 #define PVRDMA_PTABLE_SHIFT 9
74 #define PVRDMA_PAGE_DIR_DIR(x) (((x) >> PVRDMA_PDIR_SHIFT) & 0x1)
75 #define PVRDMA_PAGE_DIR_TABLE(x) (((x) >> PVRDMA_PTABLE_SHIFT) & 0x1ff)
76 #define PVRDMA_PAGE_DIR_PAGE(x) ((x) & 0x1ff)
77 #define PVRDMA_PAGE_DIR_MAX_PAGES (1 * 512 * 512)
78 #define PVRDMA_MAX_FAST_REG_PAGES 128
81 * Max MSI-X vectors.
84 #define PVRDMA_MAX_INTERRUPTS 3
86 /* Register offsets within PCI resource on BAR1. */
87 #define PVRDMA_REG_VERSION 0x00 /* R: Version of device. */
88 #define PVRDMA_REG_DSRLOW 0x04 /* W: Device shared region low PA. */
89 #define PVRDMA_REG_DSRHIGH 0x08 /* W: Device shared region high PA. */
90 #define PVRDMA_REG_CTL 0x0c /* W: PVRDMA_DEVICE_CTL */
91 #define PVRDMA_REG_REQUEST 0x10 /* W: Indicate device request. */
92 #define PVRDMA_REG_ERR 0x14 /* R: Device error. */
93 #define PVRDMA_REG_ICR 0x18 /* R: Interrupt cause. */
94 #define PVRDMA_REG_IMR 0x1c /* R/W: Interrupt mask. */
95 #define PVRDMA_REG_MACL 0x20 /* R/W: MAC address low. */
96 #define PVRDMA_REG_MACH 0x24 /* R/W: MAC address high. */
98 /* Object flags. */
99 #define PVRDMA_CQ_FLAG_ARMED_SOL BIT(0) /* Armed for solicited-only. */
100 #define PVRDMA_CQ_FLAG_ARMED BIT(1) /* Armed. */
101 #define PVRDMA_MR_FLAG_DMA BIT(0) /* DMA region. */
102 #define PVRDMA_MR_FLAG_FRMR BIT(1) /* Fast reg memory region. */
105 * Atomic operation capability (masked versions are extended atomic
106 * operations.
109 #define PVRDMA_ATOMIC_OP_COMP_SWAP BIT(0) /* Compare and swap. */
110 #define PVRDMA_ATOMIC_OP_FETCH_ADD BIT(1) /* Fetch and add. */
111 #define PVRDMA_ATOMIC_OP_MASK_COMP_SWAP BIT(2) /* Masked compare and swap. */
112 #define PVRDMA_ATOMIC_OP_MASK_FETCH_ADD BIT(3) /* Masked fetch and add. */
115 * Base Memory Management Extension flags to support Fast Reg Memory Regions
116 * and Fast Reg Work Requests. Each flag represents a verb operation and we
117 * must support all of them to qualify for the BMME device cap.
120 #define PVRDMA_BMME_FLAG_LOCAL_INV BIT(0) /* Local Invalidate. */
121 #define PVRDMA_BMME_FLAG_REMOTE_INV BIT(1) /* Remote Invalidate. */
122 #define PVRDMA_BMME_FLAG_FAST_REG_WR BIT(2) /* Fast Reg Work Request. */
125 * GID types. The interpretation of the gid_types bit field in the device
126 * capabilities will depend on the device mode. For now, the device only
127 * supports RoCE as mode, so only the different GID types for RoCE are
128 * defined.
131 #define PVRDMA_GID_TYPE_FLAG_ROCE_V1 BIT(0)
132 #define PVRDMA_GID_TYPE_FLAG_ROCE_V2 BIT(1)
135 * Version checks. This checks whether each version supports specific
136 * capabilities from the device.
139 #define PVRDMA_IS_VERSION17(_dev) \
140 (_dev->dsr_version == PVRDMA_ROCEV1_VERSION && \
141 _dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1)
143 #define PVRDMA_IS_VERSION18(_dev) \
144 (_dev->dsr_version >= PVRDMA_ROCEV2_VERSION && \
145 (_dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1 || \
146 _dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2)) \
148 #define PVRDMA_SUPPORTED(_dev) \
149 ((_dev->dsr->caps.mode == PVRDMA_DEVICE_MODE_ROCE) && \
150 (PVRDMA_IS_VERSION17(_dev) || PVRDMA_IS_VERSION18(_dev)))
153 * Get capability values based on device version.
156 #define PVRDMA_GET_CAP(_dev, _old_val, _val) \
157 ((PVRDMA_IS_VERSION18(_dev)) ? _val : _old_val)
159 enum pvrdma_pci_resource {
160 PVRDMA_PCI_RESOURCE_MSIX, /* BAR0: MSI-X, MMIO. */
161 PVRDMA_PCI_RESOURCE_REG, /* BAR1: Registers, MMIO. */
162 PVRDMA_PCI_RESOURCE_UAR, /* BAR2: UAR pages, MMIO, 64-bit. */
163 PVRDMA_PCI_RESOURCE_LAST, /* Last. */
166 enum pvrdma_device_ctl {
167 PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */
168 PVRDMA_DEVICE_CTL_UNQUIESCE, /* Unquiesce device. */
169 PVRDMA_DEVICE_CTL_RESET, /* Reset device. */
172 enum pvrdma_intr_vector {
173 PVRDMA_INTR_VECTOR_RESPONSE, /* Command response. */
174 PVRDMA_INTR_VECTOR_ASYNC, /* Async events. */
175 PVRDMA_INTR_VECTOR_CQ, /* CQ notification. */
176 /* Additional CQ notification vectors. */
179 enum pvrdma_intr_cause {
180 PVRDMA_INTR_CAUSE_RESPONSE = (1 << PVRDMA_INTR_VECTOR_RESPONSE),
181 PVRDMA_INTR_CAUSE_ASYNC = (1 << PVRDMA_INTR_VECTOR_ASYNC),
182 PVRDMA_INTR_CAUSE_CQ = (1 << PVRDMA_INTR_VECTOR_CQ),
185 enum pvrdma_gos_bits {
186 PVRDMA_GOS_BITS_UNK, /* Unknown. */
187 PVRDMA_GOS_BITS_32, /* 32-bit. */
188 PVRDMA_GOS_BITS_64, /* 64-bit. */
191 enum pvrdma_gos_type {
192 PVRDMA_GOS_TYPE_UNK, /* Unknown. */
193 PVRDMA_GOS_TYPE_LINUX, /* Linux. */
196 enum pvrdma_device_mode {
197 PVRDMA_DEVICE_MODE_ROCE, /* RoCE. */
198 PVRDMA_DEVICE_MODE_IWARP, /* iWarp. */
199 PVRDMA_DEVICE_MODE_IB, /* InfiniBand. */
202 struct pvrdma_gos_info {
203 u32 gos_bits:2; /* W: PVRDMA_GOS_BITS_ */
204 u32 gos_type:4; /* W: PVRDMA_GOS_TYPE_ */
205 u32 gos_ver:16; /* W: Guest OS version. */
206 u32 gos_misc:10; /* W: Other. */
207 u32 pad; /* Pad to 8-byte alignment. */
210 struct pvrdma_device_caps {
211 u64 fw_ver; /* R: Query device. */
212 __be64 node_guid;
213 __be64 sys_image_guid;
214 u64 max_mr_size;
215 u64 page_size_cap;
216 u64 atomic_arg_sizes; /* EX verbs. */
217 u32 ex_comp_mask; /* EX verbs. */
218 u32 device_cap_flags2; /* EX verbs. */
219 u32 max_fa_bit_boundary; /* EX verbs. */
220 u32 log_max_atomic_inline_arg; /* EX verbs. */
221 u32 vendor_id;
222 u32 vendor_part_id;
223 u32 hw_ver;
224 u32 max_qp;
225 u32 max_qp_wr;
226 u32 device_cap_flags;
227 u32 max_sge;
228 u32 max_sge_rd;
229 u32 max_cq;
230 u32 max_cqe;
231 u32 max_mr;
232 u32 max_pd;
233 u32 max_qp_rd_atom;
234 u32 max_ee_rd_atom;
235 u32 max_res_rd_atom;
236 u32 max_qp_init_rd_atom;
237 u32 max_ee_init_rd_atom;
238 u32 max_ee;
239 u32 max_rdd;
240 u32 max_mw;
241 u32 max_raw_ipv6_qp;
242 u32 max_raw_ethy_qp;
243 u32 max_mcast_grp;
244 u32 max_mcast_qp_attach;
245 u32 max_total_mcast_qp_attach;
246 u32 max_ah;
247 u32 max_fmr;
248 u32 max_map_per_fmr;
249 u32 max_srq;
250 u32 max_srq_wr;
251 u32 max_srq_sge;
252 u32 max_uar;
253 u32 gid_tbl_len;
254 u16 max_pkeys;
255 u8 local_ca_ack_delay;
256 u8 phys_port_cnt;
257 u8 mode; /* PVRDMA_DEVICE_MODE_ */
258 u8 atomic_ops; /* PVRDMA_ATOMIC_OP_* bits */
259 u8 bmme_flags; /* FRWR Mem Mgmt Extensions */
260 u8 gid_types; /* PVRDMA_GID_TYPE_FLAG_ */
261 u32 max_fast_reg_page_list_len;
264 struct pvrdma_ring_page_info {
265 u32 num_pages; /* Num pages incl. header. */
266 u32 reserved; /* Reserved. */
267 u64 pdir_dma; /* Page directory PA. */
270 #pragma pack(push, 1)
272 struct pvrdma_device_shared_region {
273 u32 driver_version; /* W: Driver version. */
274 u32 pad; /* Pad to 8-byte align. */
275 struct pvrdma_gos_info gos_info; /* W: Guest OS information. */
276 u64 cmd_slot_dma; /* W: Command slot address. */
277 u64 resp_slot_dma; /* W: Response slot address. */
278 struct pvrdma_ring_page_info async_ring_pages;
279 /* W: Async ring page info. */
280 struct pvrdma_ring_page_info cq_ring_pages;
281 /* W: CQ ring page info. */
282 u32 uar_pfn; /* W: UAR pageframe. */
283 u32 pad2; /* Pad to 8-byte align. */
284 struct pvrdma_device_caps caps; /* R: Device capabilities. */
287 #pragma pack(pop)
289 /* Event types. Currently a 1:1 mapping with enum ib_event. */
290 enum pvrdma_eqe_type {
291 PVRDMA_EVENT_CQ_ERR,
292 PVRDMA_EVENT_QP_FATAL,
293 PVRDMA_EVENT_QP_REQ_ERR,
294 PVRDMA_EVENT_QP_ACCESS_ERR,
295 PVRDMA_EVENT_COMM_EST,
296 PVRDMA_EVENT_SQ_DRAINED,
297 PVRDMA_EVENT_PATH_MIG,
298 PVRDMA_EVENT_PATH_MIG_ERR,
299 PVRDMA_EVENT_DEVICE_FATAL,
300 PVRDMA_EVENT_PORT_ACTIVE,
301 PVRDMA_EVENT_PORT_ERR,
302 PVRDMA_EVENT_LID_CHANGE,
303 PVRDMA_EVENT_PKEY_CHANGE,
304 PVRDMA_EVENT_SM_CHANGE,
305 PVRDMA_EVENT_SRQ_ERR,
306 PVRDMA_EVENT_SRQ_LIMIT_REACHED,
307 PVRDMA_EVENT_QP_LAST_WQE_REACHED,
308 PVRDMA_EVENT_CLIENT_REREGISTER,
309 PVRDMA_EVENT_GID_CHANGE,
312 /* Event queue element. */
313 struct pvrdma_eqe {
314 u32 type; /* Event type. */
315 u32 info; /* Handle, other. */
318 /* CQ notification queue element. */
319 struct pvrdma_cqne {
320 u32 info; /* Handle */
323 enum {
324 PVRDMA_CMD_FIRST,
325 PVRDMA_CMD_QUERY_PORT = PVRDMA_CMD_FIRST,
326 PVRDMA_CMD_QUERY_PKEY,
327 PVRDMA_CMD_CREATE_PD,
328 PVRDMA_CMD_DESTROY_PD,
329 PVRDMA_CMD_CREATE_MR,
330 PVRDMA_CMD_DESTROY_MR,
331 PVRDMA_CMD_CREATE_CQ,
332 PVRDMA_CMD_RESIZE_CQ,
333 PVRDMA_CMD_DESTROY_CQ,
334 PVRDMA_CMD_CREATE_QP,
335 PVRDMA_CMD_MODIFY_QP,
336 PVRDMA_CMD_QUERY_QP,
337 PVRDMA_CMD_DESTROY_QP,
338 PVRDMA_CMD_CREATE_UC,
339 PVRDMA_CMD_DESTROY_UC,
340 PVRDMA_CMD_CREATE_BIND,
341 PVRDMA_CMD_DESTROY_BIND,
342 PVRDMA_CMD_MAX,
345 enum {
346 PVRDMA_CMD_FIRST_RESP = (1 << 31),
347 PVRDMA_CMD_QUERY_PORT_RESP = PVRDMA_CMD_FIRST_RESP,
348 PVRDMA_CMD_QUERY_PKEY_RESP,
349 PVRDMA_CMD_CREATE_PD_RESP,
350 PVRDMA_CMD_DESTROY_PD_RESP_NOOP,
351 PVRDMA_CMD_CREATE_MR_RESP,
352 PVRDMA_CMD_DESTROY_MR_RESP_NOOP,
353 PVRDMA_CMD_CREATE_CQ_RESP,
354 PVRDMA_CMD_RESIZE_CQ_RESP,
355 PVRDMA_CMD_DESTROY_CQ_RESP_NOOP,
356 PVRDMA_CMD_CREATE_QP_RESP,
357 PVRDMA_CMD_MODIFY_QP_RESP,
358 PVRDMA_CMD_QUERY_QP_RESP,
359 PVRDMA_CMD_DESTROY_QP_RESP,
360 PVRDMA_CMD_CREATE_UC_RESP,
361 PVRDMA_CMD_DESTROY_UC_RESP_NOOP,
362 PVRDMA_CMD_CREATE_BIND_RESP_NOOP,
363 PVRDMA_CMD_DESTROY_BIND_RESP_NOOP,
364 PVRDMA_CMD_MAX_RESP,
367 struct pvrdma_cmd_hdr {
368 u64 response; /* Key for response lookup. */
369 u32 cmd; /* PVRDMA_CMD_ */
370 u32 reserved; /* Reserved. */
373 struct pvrdma_cmd_resp_hdr {
374 u64 response; /* From cmd hdr. */
375 u32 ack; /* PVRDMA_CMD_XXX_RESP */
376 u8 err; /* Error. */
377 u8 reserved[3]; /* Reserved. */
380 struct pvrdma_cmd_query_port {
381 struct pvrdma_cmd_hdr hdr;
382 u8 port_num;
383 u8 reserved[7];
386 struct pvrdma_cmd_query_port_resp {
387 struct pvrdma_cmd_resp_hdr hdr;
388 struct pvrdma_port_attr attrs;
391 struct pvrdma_cmd_query_pkey {
392 struct pvrdma_cmd_hdr hdr;
393 u8 port_num;
394 u8 index;
395 u8 reserved[6];
398 struct pvrdma_cmd_query_pkey_resp {
399 struct pvrdma_cmd_resp_hdr hdr;
400 u16 pkey;
401 u8 reserved[6];
404 struct pvrdma_cmd_create_uc {
405 struct pvrdma_cmd_hdr hdr;
406 u32 pfn; /* UAR page frame number */
407 u8 reserved[4];
410 struct pvrdma_cmd_create_uc_resp {
411 struct pvrdma_cmd_resp_hdr hdr;
412 u32 ctx_handle;
413 u8 reserved[4];
416 struct pvrdma_cmd_destroy_uc {
417 struct pvrdma_cmd_hdr hdr;
418 u32 ctx_handle;
419 u8 reserved[4];
422 struct pvrdma_cmd_create_pd {
423 struct pvrdma_cmd_hdr hdr;
424 u32 ctx_handle;
425 u8 reserved[4];
428 struct pvrdma_cmd_create_pd_resp {
429 struct pvrdma_cmd_resp_hdr hdr;
430 u32 pd_handle;
431 u8 reserved[4];
434 struct pvrdma_cmd_destroy_pd {
435 struct pvrdma_cmd_hdr hdr;
436 u32 pd_handle;
437 u8 reserved[4];
440 struct pvrdma_cmd_create_mr {
441 struct pvrdma_cmd_hdr hdr;
442 u64 start;
443 u64 length;
444 u64 pdir_dma;
445 u32 pd_handle;
446 u32 access_flags;
447 u32 flags;
448 u32 nchunks;
451 struct pvrdma_cmd_create_mr_resp {
452 struct pvrdma_cmd_resp_hdr hdr;
453 u32 mr_handle;
454 u32 lkey;
455 u32 rkey;
456 u8 reserved[4];
459 struct pvrdma_cmd_destroy_mr {
460 struct pvrdma_cmd_hdr hdr;
461 u32 mr_handle;
462 u8 reserved[4];
465 struct pvrdma_cmd_create_cq {
466 struct pvrdma_cmd_hdr hdr;
467 u64 pdir_dma;
468 u32 ctx_handle;
469 u32 cqe;
470 u32 nchunks;
471 u8 reserved[4];
474 struct pvrdma_cmd_create_cq_resp {
475 struct pvrdma_cmd_resp_hdr hdr;
476 u32 cq_handle;
477 u32 cqe;
480 struct pvrdma_cmd_resize_cq {
481 struct pvrdma_cmd_hdr hdr;
482 u32 cq_handle;
483 u32 cqe;
486 struct pvrdma_cmd_resize_cq_resp {
487 struct pvrdma_cmd_resp_hdr hdr;
488 u32 cqe;
489 u8 reserved[4];
492 struct pvrdma_cmd_destroy_cq {
493 struct pvrdma_cmd_hdr hdr;
494 u32 cq_handle;
495 u8 reserved[4];
498 struct pvrdma_cmd_create_qp {
499 struct pvrdma_cmd_hdr hdr;
500 u64 pdir_dma;
501 u32 pd_handle;
502 u32 send_cq_handle;
503 u32 recv_cq_handle;
504 u32 srq_handle;
505 u32 max_send_wr;
506 u32 max_recv_wr;
507 u32 max_send_sge;
508 u32 max_recv_sge;
509 u32 max_inline_data;
510 u32 lkey;
511 u32 access_flags;
512 u16 total_chunks;
513 u16 send_chunks;
514 u16 max_atomic_arg;
515 u8 sq_sig_all;
516 u8 qp_type;
517 u8 is_srq;
518 u8 reserved[3];
521 struct pvrdma_cmd_create_qp_resp {
522 struct pvrdma_cmd_resp_hdr hdr;
523 u32 qpn;
524 u32 max_send_wr;
525 u32 max_recv_wr;
526 u32 max_send_sge;
527 u32 max_recv_sge;
528 u32 max_inline_data;
531 struct pvrdma_cmd_modify_qp {
532 struct pvrdma_cmd_hdr hdr;
533 u32 qp_handle;
534 u32 attr_mask;
535 struct pvrdma_qp_attr attrs;
538 struct pvrdma_cmd_query_qp {
539 struct pvrdma_cmd_hdr hdr;
540 u32 qp_handle;
541 u32 attr_mask;
544 struct pvrdma_cmd_query_qp_resp {
545 struct pvrdma_cmd_resp_hdr hdr;
546 struct pvrdma_qp_attr attrs;
549 struct pvrdma_cmd_destroy_qp {
550 struct pvrdma_cmd_hdr hdr;
551 u32 qp_handle;
552 u8 reserved[4];
555 struct pvrdma_cmd_destroy_qp_resp {
556 struct pvrdma_cmd_resp_hdr hdr;
557 u32 events_reported;
558 u8 reserved[4];
561 struct pvrdma_cmd_create_bind {
562 struct pvrdma_cmd_hdr hdr;
563 u32 mtu;
564 u32 vlan;
565 u32 index;
566 u8 new_gid[16];
567 u8 gid_type;
568 u8 reserved[3];
571 struct pvrdma_cmd_destroy_bind {
572 struct pvrdma_cmd_hdr hdr;
573 u32 index;
574 u8 dest_gid[16];
575 u8 reserved[4];
578 union pvrdma_cmd_req {
579 struct pvrdma_cmd_hdr hdr;
580 struct pvrdma_cmd_query_port query_port;
581 struct pvrdma_cmd_query_pkey query_pkey;
582 struct pvrdma_cmd_create_uc create_uc;
583 struct pvrdma_cmd_destroy_uc destroy_uc;
584 struct pvrdma_cmd_create_pd create_pd;
585 struct pvrdma_cmd_destroy_pd destroy_pd;
586 struct pvrdma_cmd_create_mr create_mr;
587 struct pvrdma_cmd_destroy_mr destroy_mr;
588 struct pvrdma_cmd_create_cq create_cq;
589 struct pvrdma_cmd_resize_cq resize_cq;
590 struct pvrdma_cmd_destroy_cq destroy_cq;
591 struct pvrdma_cmd_create_qp create_qp;
592 struct pvrdma_cmd_modify_qp modify_qp;
593 struct pvrdma_cmd_query_qp query_qp;
594 struct pvrdma_cmd_destroy_qp destroy_qp;
595 struct pvrdma_cmd_create_bind create_bind;
596 struct pvrdma_cmd_destroy_bind destroy_bind;
599 union pvrdma_cmd_resp {
600 struct pvrdma_cmd_resp_hdr hdr;
601 struct pvrdma_cmd_query_port_resp query_port_resp;
602 struct pvrdma_cmd_query_pkey_resp query_pkey_resp;
603 struct pvrdma_cmd_create_uc_resp create_uc_resp;
604 struct pvrdma_cmd_create_pd_resp create_pd_resp;
605 struct pvrdma_cmd_create_mr_resp create_mr_resp;
606 struct pvrdma_cmd_create_cq_resp create_cq_resp;
607 struct pvrdma_cmd_resize_cq_resp resize_cq_resp;
608 struct pvrdma_cmd_create_qp_resp create_qp_resp;
609 struct pvrdma_cmd_query_qp_resp query_qp_resp;
610 struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp;
613 #endif /* __PVRDMA_DEV_API_H__ */